diff --git a/src/i18n/locales/en/chat.json b/src/i18n/locales/en/chat.json
index b2b83f73d..437dc0d1d 100644
--- a/src/i18n/locales/en/chat.json
+++ b/src/i18n/locales/en/chat.json
@@ -13,8 +13,6 @@
"noLogs": "(No logs available yet)",
"toolbar": {
"refresh": "Refresh chat",
- "showThinking": "Show thinking",
- "hideThinking": "Hide thinking",
"currentAgent": "Talking to {{agent}}"
},
"taskPanel": {
@@ -34,19 +32,12 @@
}
},
"executionGraph": {
- "eyebrow": "Conversation Run",
"title": "Execution Graph",
- "status": {
- "active": "Active",
- "latest": "Latest",
- "previous": "Previous"
- },
"branchLabel": "branch",
- "userTrigger": "User Trigger",
- "userTriggerHint": "Triggered by the user message above",
+ "thinkingLabel": "Thinking",
"agentRun": "{{agent}} execution",
- "agentReply": "Assistant Reply",
- "agentReplyHint": "Resolved in the assistant reply below"
+ "collapsedSummary": "{{toolCount}} tool calls · {{processCount}} process messages",
+ "collapseAction": "Collapse execution graph"
},
"composer": {
"attachFiles": "Attach files",
diff --git a/src/i18n/locales/ja/chat.json b/src/i18n/locales/ja/chat.json
index 96584b225..d6f01557e 100644
--- a/src/i18n/locales/ja/chat.json
+++ b/src/i18n/locales/ja/chat.json
@@ -13,8 +13,6 @@
"noLogs": "(ログはまだありません)",
"toolbar": {
"refresh": "チャットを更新",
- "showThinking": "思考を表示",
- "hideThinking": "思考を非表示",
"currentAgent": "現在の会話相手: {{agent}}"
},
"taskPanel": {
@@ -34,19 +32,12 @@
}
},
"executionGraph": {
- "eyebrow": "会話実行",
"title": "実行グラフ",
- "status": {
- "active": "進行中",
- "latest": "直近",
- "previous": "履歴"
- },
"branchLabel": "branch",
- "userTrigger": "ユーザー入力",
- "userTriggerHint": "上のユーザーメッセージがトリガーです",
+ "thinkingLabel": "考え中",
"agentRun": "{{agent}} の実行",
- "agentReply": "アシスタント返信",
- "agentReplyHint": "結果は下のアシスタント返信に反映されます"
+ "collapsedSummary": "ツール呼び出し {{toolCount}} 件 · プロセスメッセージ {{processCount}} 件",
+ "collapseAction": "実行グラフを折りたたむ"
},
"composer": {
"attachFiles": "ファイルを添付",
diff --git a/src/i18n/locales/ru/chat.json b/src/i18n/locales/ru/chat.json
index 949d0c731..d964ece26 100644
--- a/src/i18n/locales/ru/chat.json
+++ b/src/i18n/locales/ru/chat.json
@@ -13,8 +13,6 @@
"noLogs": "(Журналы ещё недоступны)",
"toolbar": {
"refresh": "Обновить чат",
- "showThinking": "Показать размышления",
- "hideThinking": "Скрыть размышления",
"currentAgent": "Общение с {{agent}}"
},
"taskPanel": {
@@ -34,19 +32,12 @@
}
},
"executionGraph": {
- "eyebrow": "Выполнение в чате",
"title": "Граф выполнения",
- "status": {
- "active": "Активно",
- "latest": "Последнее",
- "previous": "Предыдущее"
- },
"branchLabel": "ветвь",
- "userTrigger": "Триггер пользователя",
- "userTriggerHint": "Запущен пользовательским сообщением выше",
+ "thinkingLabel": "Думаю",
"agentRun": "Выполнение {{agent}}",
- "agentReply": "Ответ ассистента",
- "agentReplyHint": "Разрешено в ответе ассистента ниже"
+ "collapsedSummary": "Вызовов инструментов: {{toolCount}} · Промежуточных сообщений: {{processCount}}",
+ "collapseAction": "Свернуть граф выполнения"
},
"composer": {
"attachFiles": "Прикрепить файлы",
diff --git a/src/i18n/locales/zh/chat.json b/src/i18n/locales/zh/chat.json
index 697c08df8..88ea7f963 100644
--- a/src/i18n/locales/zh/chat.json
+++ b/src/i18n/locales/zh/chat.json
@@ -13,8 +13,6 @@
"noLogs": "(暂无日志)",
"toolbar": {
"refresh": "刷新聊天",
- "showThinking": "显示思考过程",
- "hideThinking": "隐藏思考过程",
"currentAgent": "当前对话对象:{{agent}}"
},
"taskPanel": {
@@ -34,19 +32,12 @@
}
},
"executionGraph": {
- "eyebrow": "对话执行",
"title": "执行关系图",
- "status": {
- "active": "执行中",
- "latest": "最近一次",
- "previous": "历史"
- },
"branchLabel": "分支",
- "userTrigger": "用户触发",
- "userTriggerHint": "对应上方这条用户消息",
+ "thinkingLabel": "思考中",
"agentRun": "{{agent}} 执行",
- "agentReply": "助手回复",
- "agentReplyHint": "结果体现在下方这条助手回复里"
+ "collapsedSummary": "{{toolCount}} 个工具调用,{{processCount}} 条过程消息",
+ "collapseAction": "收起执行关系图"
},
"composer": {
"attachFiles": "添加文件",
diff --git a/src/pages/Chat/ChatInput.tsx b/src/pages/Chat/ChatInput.tsx
index dbe8307eb..c1a479d81 100644
--- a/src/pages/Chat/ChatInput.tsx
+++ b/src/pages/Chat/ChatInput.tsx
@@ -114,7 +114,7 @@ export function ChatInput({ onSend, onStop, disabled = false, sending = false, i
useEffect(() => {
if (textareaRef.current) {
textareaRef.current.style.height = 'auto';
- textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 200)}px`;
+ textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 240)}px`;
}
}, [input]);
@@ -407,33 +407,54 @@ export function ChatInput({ onSend, onStop, disabled = false, sending = false, i
)}
- {/* Input Row */}
-
+ {/* Input Container */}
+
{selectedTarget && (
-
+
)}
-
+ {/* Text Row — flush-left */}
+
);
}
diff --git a/src/pages/Chat/ExecutionGraphCard.tsx b/src/pages/Chat/ExecutionGraphCard.tsx
index 607b5b40f..17e3d4c55 100644
--- a/src/pages/Chat/ExecutionGraphCard.tsx
+++ b/src/pages/Chat/ExecutionGraphCard.tsx
@@ -1,16 +1,32 @@
import { useState } from 'react';
-import { ArrowDown, ArrowUp, Bot, CheckCircle2, ChevronDown, ChevronRight, CircleDashed, GitBranch, Sparkles, Wrench, XCircle } from 'lucide-react';
+import { CheckCircle2, ChevronDown, ChevronRight, CircleDashed, GitBranch, MessageSquare, Wrench, XCircle } from 'lucide-react';
import { useTranslation } from 'react-i18next';
import { cn } from '@/lib/utils';
import type { TaskStep } from './task-visualization';
interface ExecutionGraphCardProps {
agentLabel: string;
- sessionLabel: string;
steps: TaskStep[];
active: boolean;
- onJumpToTrigger?: () => void;
- onJumpToReply?: () => void;
+ /**
+ * When provided, the card becomes fully controlled: the parent owns the
+ * expand state (e.g. to persist across remounts) and toggling goes through
+ * `onExpandedChange`. When omitted, the card manages its own local state.
+ */
+ expanded?: boolean;
+ onExpandedChange?: (expanded: boolean) => void;
+}
+
+const TOOL_ROW_EXTRA_INDENT_PX = 8;
+
+function AnimatedDots({ className }: { className?: string }) {
+ return (
+
+ .
+ .
+ .
+
+ );
}
function GraphStatusIcon({ status }: { status: TaskStep['status'] }) {
@@ -23,45 +39,107 @@ function StepDetailCard({ step }: { step: TaskStep }) {
const { t } = useTranslation('chat');
const [expanded, setExpanded] = useState(false);
const hasDetail = !!step.detail;
+ // Narration steps (intermediate pure-text assistant messages folded from
+ // the chat stream) are rendered without a label/status pill: the message
+ // text IS the primary content.
+ const isNarration = step.kind === 'message';
+ const isTool = step.kind === 'tool';
+ const isThinking = step.kind === 'thinking';
+ const showRunningDots = isTool && step.status === 'running';
+ const hideStatusText = isTool && step.status === 'completed';
+ const detailPreview = step.detail?.replace(/\s+/g, ' ').trim();
+ const canExpand = hasDetail;
+ const usePlainExpandedDetail = isTool || isThinking;
+ const displayLabel = isThinking ? t('executionGraph.thinkingLabel') : step.label;
return (
-
+
- {step.detail && expanded && (
+ {step.detail && expanded && canExpand && (
+ usePlainExpandedDetail ? (
+
+ {step.detail}
+
+ ) : (
+ )
)}
);
@@ -69,118 +147,147 @@ function StepDetailCard({ step }: { step: TaskStep }) {
export function ExecutionGraphCard({
agentLabel,
- sessionLabel,
steps,
active,
- onJumpToTrigger,
- onJumpToReply,
+ expanded: controlledExpanded,
+ onExpandedChange,
}: ExecutionGraphCardProps) {
const { t } = useTranslation('chat');
+ // Active runs should stay expanded by default so the user can follow the
+ // execution live. Once the run completes, the default state returns to
+ // collapsed. Explicit user toggles remain controlled by the parent override.
+ const [uncontrolledExpanded, setUncontrolledExpanded] = useState(active);
+ const [prevActive, setPrevActive] = useState(active);
+ if (prevActive !== active) {
+ setPrevActive(active);
+ if (controlledExpanded == null && uncontrolledExpanded !== active) {
+ setUncontrolledExpanded(active);
+ }
+ }
+
+ const isControlled = controlledExpanded != null;
+ const expanded = isControlled ? controlledExpanded : uncontrolledExpanded;
+ const setExpanded = (next: boolean) => {
+ if (!isControlled) setUncontrolledExpanded(next);
+ onExpandedChange?.(next);
+ };
+
+ const toolCount = steps.filter((step) => step.kind === 'tool').length;
+ const processCount = steps.length - toolCount;
+ const shouldShowTrailingThinking = active;
+
+ if (!expanded) {
+ return (
+
+ );
+ }
+
return (
-
-
-
- {t('executionGraph.eyebrow')}
-
-
{t('executionGraph.title')}
-
- {agentLabel} · {sessionLabel}
-
-
-
- {active ? t('executionGraph.status.active') : t('executionGraph.status.previous')}
-
-
+
-
-
-
-
-
-
-
-
-
+
+
+
-
-
-
- {t('executionGraph.agentRun', { agent: agentLabel })}
-
+
+
+ {t('executionGraph.agentRun', { agent: agentLabel })}
+
- {steps.map((step, index) => (
-
+ {steps.map((step) => {
+ const alignedIndentOffset = (
+ step.kind === 'tool'
+ || step.kind === 'message'
+ || step.kind === 'thinking'
+ ) ? TOOL_ROW_EXTRA_INDENT_PX : 0;
+ const rowMarginLeft = (Math.max(step.depth - 1, 0) * 24) + alignedIndentOffset;
+ return (
+
-
+
{step.depth > 1 && (
-
+
)}
- {step.kind === 'thinking' ?
: step.kind === 'tool' ?
:
}
+ {step.kind === 'thinking'
+ ?
+ : step.kind === 'tool'
+ ?
+ : step.kind === 'message'
+ ?
+ :
}
- {index === steps.length - 1 && (
- <>
-
-
- >
- )}
- ))}
+ )})}
+ {shouldShowTrailingThinking && (
+
+
+
+
+
+
{t('executionGraph.thinkingLabel')}
+
+
+
+
+ )}
);
diff --git a/src/pages/Chat/index.tsx b/src/pages/Chat/index.tsx
index e7c9d47ef..1d512b2d0 100644
--- a/src/pages/Chat/index.tsx
+++ b/src/pages/Chat/index.tsx
@@ -4,7 +4,7 @@
* via gateway:rpc IPC. Session selector, thinking toggle, and refresh
* are in the toolbar; messages render with markdown + streaming.
*/
-import { useEffect, useState } from 'react';
+import { useEffect, useMemo, useState } from 'react';
import { AlertCircle, Loader2, Sparkles } from 'lucide-react';
import { useChatStore, type RawMessage } from '@/stores/chat';
import { useGatewayStore } from '@/stores/gateway';
@@ -15,13 +15,46 @@ import { ChatMessage } from './ChatMessage';
import { ChatInput } from './ChatInput';
import { ExecutionGraphCard } from './ExecutionGraphCard';
import { ChatToolbar } from './ChatToolbar';
-import { extractImages, extractText, extractThinking, extractToolUse } from './message-utils';
-import { deriveTaskSteps, parseSubagentCompletionInfo } from './task-visualization';
+import { extractImages, extractText, extractThinking, extractToolUse, stripProcessMessagePrefix } from './message-utils';
+import { deriveTaskSteps, findReplyMessageIndex, parseSubagentCompletionInfo, type TaskStep } from './task-visualization';
import { useTranslation } from 'react-i18next';
import { cn } from '@/lib/utils';
import { useStickToBottomInstant } from '@/hooks/use-stick-to-bottom-instant';
import { useMinLoading } from '@/hooks/use-min-loading';
+type GraphStepCacheEntry = {
+ steps: ReturnType
;
+ agentLabel: string;
+ sessionLabel: string;
+ segmentEnd: number;
+ replyIndex: number | null;
+ triggerIndex: number;
+};
+
+type UserRunCard = {
+ triggerIndex: number;
+ replyIndex: number | null;
+ active: boolean;
+ agentLabel: string;
+ sessionLabel: string;
+ segmentEnd: number;
+ steps: TaskStep[];
+ messageStepTexts: string[];
+ streamingReplyText: string | null;
+};
+
+function getPrimaryMessageStepTexts(steps: TaskStep[]): string[] {
+ return steps
+ .filter((step) => step.kind === 'message' && step.parentId === 'agent-run' && !!step.detail)
+ .map((step) => step.detail!);
+}
+
+// Keep the last non-empty execution-graph snapshot per session/run outside
+// React state so `loadHistory` refreshes can still fall back to the previous
+// steps without tripping React's set-state-in-effect lint rule.
+const graphStepCacheStore = new Map>();
+const streamingTimestampStore = new Map();
+
export function Chat() {
const { t } = useTranslation('chat');
const gatewayStatus = useGatewayStore((s) => s.status);
@@ -34,7 +67,6 @@ export function Chat() {
const loading = useChatStore((s) => s.loading);
const sending = useChatStore((s) => s.sending);
const error = useChatStore((s) => s.error);
- const showThinking = useChatStore((s) => s.showThinking);
const streamingMessage = useChatStore((s) => s.streamingMessage);
const streamingTools = useChatStore((s) => s.streamingTools);
const pendingFinal = useChatStore((s) => s.pendingFinal);
@@ -46,8 +78,14 @@ export function Chat() {
const cleanupEmptySession = useChatStore((s) => s.cleanupEmptySession);
const [childTranscripts, setChildTranscripts] = useState>({});
-
- const [streamingTimestamp, setStreamingTimestamp] = useState(0);
+ // Persistent per-run override for the Execution Graph's expanded/collapsed
+ // state. Keyed by a stable run id (trigger message id, or a fallback of
+ // `${sessionKey}:${triggerIdx}`) so user toggles survive the `loadHistory`
+ // refresh that runs after every final event — otherwise the card would
+ // remount and reset. `undefined` values mean "user hasn't toggled, let the
+ // card pick a default from its own `active` prop."
+ const [graphExpandedOverrides, setGraphExpandedOverrides] = useState>({});
+ const graphStepCache: Record = graphStepCacheStore.get(currentSessionKey) ?? {};
const minLoading = useMinLoading(loading && messages.length > 0);
const { contentRef, scrollRef } = useStickToBottomInstant(currentSessionKey);
@@ -117,30 +155,33 @@ export function Chat() {
};
}, [messages, childTranscripts]);
- // Update timestamp when sending starts
- useEffect(() => {
- if (sending && streamingTimestamp === 0) {
- // eslint-disable-next-line react-hooks/set-state-in-effect
- setStreamingTimestamp(Date.now() / 1000);
- } else if (!sending && streamingTimestamp !== 0) {
- setStreamingTimestamp(0);
- }
- }, [sending, streamingTimestamp]);
-
- // Gateway not running block has been completely removed so the UI always renders.
-
const streamMsg = streamingMessage && typeof streamingMessage === 'object'
? streamingMessage as unknown as { role?: string; content?: unknown; timestamp?: number }
: null;
+ const streamTimestamp = typeof streamMsg?.timestamp === 'number' ? streamMsg.timestamp : 0;
+ useEffect(() => {
+ if (!sending) {
+ streamingTimestampStore.delete(currentSessionKey);
+ return;
+ }
+ if (!streamingTimestampStore.has(currentSessionKey)) {
+ streamingTimestampStore.set(currentSessionKey, streamTimestamp || Date.now() / 1000);
+ }
+ }, [currentSessionKey, sending, streamTimestamp]);
+
+ const streamingTimestamp = sending
+ ? (streamingTimestampStore.get(currentSessionKey) ?? streamTimestamp)
+ : 0;
const streamText = streamMsg ? extractText(streamMsg) : (typeof streamingMessage === 'string' ? streamingMessage : '');
const hasStreamText = streamText.trim().length > 0;
const streamThinking = streamMsg ? extractThinking(streamMsg) : null;
- const hasStreamThinking = showThinking && !!streamThinking && streamThinking.trim().length > 0;
+ const hasStreamThinking = !!streamThinking && streamThinking.trim().length > 0;
const streamTools = streamMsg ? extractToolUse(streamMsg) : [];
const hasStreamTools = streamTools.length > 0;
const streamImages = streamMsg ? extractImages(streamMsg) : [];
const hasStreamImages = streamImages.length > 0;
const hasStreamToolStatus = streamingTools.length > 0;
+ const hasRunningStreamToolStatus = streamingTools.some((tool) => tool.status === 'running');
const shouldRenderStreaming = sending && (hasStreamText || hasStreamThinking || hasStreamTools || hasStreamImages || hasStreamToolStatus);
const hasAnyStreamContent = hasStreamText || hasStreamThinking || hasStreamTools || hasStreamImages || hasStreamToolStatus;
@@ -155,76 +196,236 @@ export function Chat() {
}
}
- const userRunCards = messages.flatMap((message, idx) => {
+ // Indices of intermediate assistant process messages that are represented
+ // in the ExecutionGraphCard (narration text and/or thinking). We suppress
+ // them from the chat stream so they don't appear duplicated below the graph.
+ const foldedNarrationIndices = new Set();
+
+ const userRunCards: UserRunCard[] = messages.flatMap((message, idx) => {
if (message.role !== 'user' || subagentCompletionInfos[idx]) return [];
+ const runKey = message.id
+ ? `msg-${message.id}`
+ : `${currentSessionKey}:trigger-${idx}`;
const nextUserIndex = nextUserMessageIndexes[idx];
const segmentEnd = nextUserIndex === -1 ? messages.length : nextUserIndex;
const segmentMessages = messages.slice(idx + 1, segmentEnd);
- const replyIndexOffset = segmentMessages.findIndex((candidate) => candidate.role === 'assistant');
- const replyIndex = replyIndexOffset === -1 ? null : idx + 1 + replyIndexOffset;
const completionInfos = subagentCompletionInfos
.slice(idx + 1, segmentEnd)
.filter((value): value is NonNullable => value != null);
const isLatestOpenRun = nextUserIndex === -1 && (sending || pendingFinal || hasAnyStreamContent);
- let steps = deriveTaskSteps({
- messages: segmentMessages,
- streamingMessage: isLatestOpenRun ? streamingMessage : null,
- streamingTools: isLatestOpenRun ? streamingTools : [],
- sending: isLatestOpenRun ? sending : false,
- pendingFinal: isLatestOpenRun ? pendingFinal : false,
- showThinking,
- });
+ const replyIndexOffset = findReplyMessageIndex(segmentMessages, isLatestOpenRun);
+ const replyIndex = replyIndexOffset === -1 ? null : idx + 1 + replyIndexOffset;
- for (const completion of completionInfos) {
- const childMessages = childTranscripts[completion.sessionId];
- if (!childMessages || childMessages.length === 0) continue;
- const branchRootId = `subagent:${completion.sessionId}`;
- const childSteps = deriveTaskSteps({
- messages: childMessages,
- streamingMessage: null,
- streamingTools: [],
- sending: false,
- pendingFinal: false,
- showThinking,
- }).map((step) => ({
- ...step,
- id: `${completion.sessionId}:${step.id}`,
- depth: step.depth + 1,
- parentId: branchRootId,
- }));
+ const buildSteps = (omitLastStreamingMessageSegment: boolean): TaskStep[] => {
+ let builtSteps = deriveTaskSteps({
+ messages: segmentMessages,
+ streamingMessage: isLatestOpenRun ? streamingMessage : null,
+ streamingTools: isLatestOpenRun ? streamingTools : [],
+ omitLastStreamingMessageSegment: isLatestOpenRun ? omitLastStreamingMessageSegment : false,
+ });
- steps = [
- ...steps,
- {
- id: branchRootId,
- label: `${completion.agentId} subagent`,
- status: 'completed',
- kind: 'system' as const,
- detail: completion.sessionKey,
- depth: 1,
- parentId: 'agent-run',
- },
- ...childSteps,
- ];
+ for (const completion of completionInfos) {
+ const childMessages = childTranscripts[completion.sessionId];
+ if (!childMessages || childMessages.length === 0) continue;
+ const branchRootId = `subagent:${completion.sessionId}`;
+ const childSteps = deriveTaskSteps({
+ messages: childMessages,
+ streamingMessage: null,
+ streamingTools: [],
+ }).map((step) => ({
+ ...step,
+ id: `${completion.sessionId}:${step.id}`,
+ depth: step.depth + 1,
+ parentId: branchRootId,
+ }));
+
+ builtSteps = [
+ ...builtSteps,
+ {
+ id: branchRootId,
+ label: `${completion.agentId} subagent`,
+ status: 'completed',
+ kind: 'system' as const,
+ detail: completion.sessionKey,
+ depth: 1,
+ parentId: 'agent-run',
+ },
+ ...childSteps,
+ ];
+ }
+
+ return builtSteps;
+ };
+
+ const rawStreamingReplyCandidate = isLatestOpenRun
+ && pendingFinal
+ && (hasStreamText || hasStreamImages)
+ && streamTools.length === 0
+ && !hasRunningStreamToolStatus;
+
+ let steps = buildSteps(rawStreamingReplyCandidate);
+ let streamingReplyText: string | null = null;
+ if (rawStreamingReplyCandidate) {
+ const trimmedReplyText = stripProcessMessagePrefix(streamText, getPrimaryMessageStepTexts(steps));
+ const hasReplyText = trimmedReplyText.trim().length > 0;
+ if (hasReplyText || hasStreamImages) {
+ streamingReplyText = trimmedReplyText;
+ } else {
+ steps = buildSteps(false);
+ }
}
- if (steps.length === 0) return [];
-
const segmentAgentId = currentAgentId;
const segmentAgentLabel = agents.find((agent) => agent.id === segmentAgentId)?.name || segmentAgentId;
const segmentSessionLabel = sessionLabels[currentSessionKey] || currentSessionKey;
+ if (steps.length === 0) {
+ if (isLatestOpenRun && streamingReplyText == null) {
+ return [{
+ triggerIndex: idx,
+ replyIndex,
+ active: true,
+ agentLabel: segmentAgentLabel,
+ sessionLabel: segmentSessionLabel,
+ segmentEnd: nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1,
+ steps: [],
+ messageStepTexts: [],
+ streamingReplyText: null,
+ }];
+ }
+ const cached = graphStepCache[runKey];
+ if (!cached) return [];
+ return [{
+ triggerIndex: idx,
+ replyIndex: cached.replyIndex,
+ active: false,
+ agentLabel: cached.agentLabel,
+ sessionLabel: cached.sessionLabel,
+ segmentEnd: nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1,
+ steps: cached.steps,
+ messageStepTexts: getPrimaryMessageStepTexts(cached.steps),
+ streamingReplyText: null,
+ }];
+ }
+
+ // Mark intermediate assistant messages whose process output should be folded into
+ // the ExecutionGraphCard. We fold the text regardless of whether the
+ // message ALSO carries tool calls (mixed `text + toolCall` messages are
+ // common — e.g. "waiting for the page to load…" followed by a `wait`
+ // tool call). This prevents orphan narration bubbles from leaking into
+ // the chat stream once the graph is collapsed.
+ //
+ // When the run is still streaming (`isLatestOpenRun`) the final reply is
+ // not yet part of `segmentMessages`, so every assistant message in the
+ // segment counts as intermediate. For completed runs, we preserve the
+ // final reply bubble by skipping the message that `findReplyMessageIndex`
+ // identifies as the answer.
+ const segmentReplyOffset = findReplyMessageIndex(segmentMessages, isLatestOpenRun);
+ for (let offset = 0; offset < segmentMessages.length; offset += 1) {
+ if (offset === segmentReplyOffset) continue;
+ const candidate = segmentMessages[offset];
+ if (!candidate || candidate.role !== 'assistant') continue;
+ const hasNarrationText = extractText(candidate).trim().length > 0;
+ const hasThinking = !!extractThinking(candidate);
+ if (!hasNarrationText && !hasThinking) continue;
+ foldedNarrationIndices.add(idx + 1 + offset);
+ }
+
return [{
triggerIndex: idx,
replyIndex,
- active: isLatestOpenRun,
+ active: isLatestOpenRun && streamingReplyText == null,
agentLabel: segmentAgentLabel,
sessionLabel: segmentSessionLabel,
segmentEnd: nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1,
steps,
+ messageStepTexts: getPrimaryMessageStepTexts(steps),
+ streamingReplyText,
}];
});
+ const hasActiveExecutionGraph = userRunCards.some((card) => card.active);
+ const replyTextOverrides = new Map();
+ for (const card of userRunCards) {
+ if (card.replyIndex == null) continue;
+ const replyMessage = messages[card.replyIndex];
+ if (!replyMessage || replyMessage.role !== 'assistant') continue;
+ const fullReplyText = extractText(replyMessage);
+ const trimmedReplyText = stripProcessMessagePrefix(fullReplyText, card.messageStepTexts);
+ if (trimmedReplyText !== fullReplyText) {
+ replyTextOverrides.set(card.replyIndex, trimmedReplyText);
+ }
+ }
+ const streamingReplyText = userRunCards.find((card) => card.streamingReplyText != null)?.streamingReplyText ?? null;
+
+ // Derive the set of run keys that should be auto-collapsed (run finished
+ // streaming or has a reply override) during render instead of in an effect,
+ // so we don't violate react-hooks/set-state-in-effect. Explicit user toggles
+ // still win via `graphExpandedOverrides` and are merged in at the call site.
+ const autoCollapsedRunKeys = useMemo(() => {
+ const keys = new Set();
+ for (const card of userRunCards) {
+ const shouldCollapse = card.streamingReplyText != null
+ || (card.replyIndex != null && replyTextOverrides.has(card.replyIndex));
+ if (!shouldCollapse) continue;
+ const triggerMsg = messages[card.triggerIndex];
+ const runKey = triggerMsg?.id
+ ? `msg-${triggerMsg.id}`
+ : `${currentSessionKey}:trigger-${card.triggerIndex}`;
+ keys.add(runKey);
+ }
+ return keys;
+ }, [currentSessionKey, messages, replyTextOverrides, userRunCards]);
+
+ useEffect(() => {
+ if (userRunCards.length === 0) return;
+ const current = graphStepCacheStore.get(currentSessionKey) ?? {};
+ let changed = false;
+ const next = { ...current };
+ for (const card of userRunCards) {
+ if (card.steps.length === 0) continue;
+ const triggerMsg = messages[card.triggerIndex];
+ const runKey = triggerMsg?.id
+ ? `msg-${triggerMsg.id}`
+ : `${currentSessionKey}:trigger-${card.triggerIndex}`;
+ const existing = current[runKey];
+ const sameSteps = !!existing
+ && existing.steps.length === card.steps.length
+ && existing.steps.every((step, index) => {
+ const nextStep = card.steps[index];
+ return nextStep
+ && step.id === nextStep.id
+ && step.label === nextStep.label
+ && step.status === nextStep.status
+ && step.kind === nextStep.kind
+ && step.detail === nextStep.detail
+ && step.depth === nextStep.depth
+ && step.parentId === nextStep.parentId;
+ });
+ if (
+ sameSteps
+ && existing?.agentLabel === card.agentLabel
+ && existing?.sessionLabel === card.sessionLabel
+ && existing?.segmentEnd === card.segmentEnd
+ && existing?.replyIndex === card.replyIndex
+ && existing?.triggerIndex === card.triggerIndex
+ ) {
+ continue;
+ }
+ next[runKey] = {
+ steps: card.steps,
+ agentLabel: card.agentLabel,
+ sessionLabel: card.sessionLabel,
+ segmentEnd: card.segmentEnd,
+ replyIndex: card.replyIndex,
+ triggerIndex: card.triggerIndex,
+ };
+ changed = true;
+ }
+ if (changed) {
+ graphStepCacheStore.set(currentSessionKey, next);
+ }
+ }, [userRunCards, messages, currentSessionKey]);
return (
@@ -237,12 +438,19 @@ export function Chat() {
-
+
{isEmpty ? (
) : (
<>
{messages.map((msg, idx) => {
+ if (foldedNarrationIndices.has(idx)) return null;
const suppressToolCards = userRunCards.some((card) =>
idx > card.triggerIndex && idx <= card.segmentEnd,
);
@@ -255,40 +463,42 @@ export function Chat() {
>
{userRunCards
.filter((card) => card.triggerIndex === idx)
- .map((card) => (
- {
- document.getElementById(`chat-message-${card.triggerIndex}`)?.scrollIntoView({
- behavior: 'smooth',
- block: 'center',
- });
- }}
- onJumpToReply={() => {
- if (card.replyIndex == null) return;
- document.getElementById(`chat-message-${card.replyIndex}`)?.scrollIntoView({
- behavior: 'smooth',
- block: 'center',
- });
- }}
- />
- ))}
+ .map((card) => {
+ const triggerMsg = messages[card.triggerIndex];
+ const runKey = triggerMsg?.id
+ ? `msg-${triggerMsg.id}`
+ : `${currentSessionKey}:trigger-${card.triggerIndex}`;
+ const userOverride = graphExpandedOverrides[runKey];
+ const expanded = userOverride != null
+ ? userOverride
+ : autoCollapsedRunKeys.has(runKey)
+ ? false
+ : undefined;
+ return (
+
+ setGraphExpandedOverrides((prev) => ({ ...prev, [runKey]: next }))
+ }
+ />
+ );
+ })}
);
})}
{/* Streaming message */}
- {shouldRenderStreaming && (
+ {shouldRenderStreaming && !hasActiveExecutionGraph && (
)}
{/* Activity indicator: waiting for next AI turn after tool execution */}
- {sending && pendingFinal && !shouldRenderStreaming && (
+ {sending && pendingFinal && !shouldRenderStreaming && !hasActiveExecutionGraph && (
)}
{/* Typing indicator when sending but no stream content yet */}
- {sending && !pendingFinal && !hasAnyStreamContent && (
+ {sending && !pendingFinal && !hasAnyStreamContent && !hasActiveExecutionGraph && (
)}
>
diff --git a/src/pages/Chat/message-utils.ts b/src/pages/Chat/message-utils.ts
index 4a490bbbb..0a014435f 100644
--- a/src/pages/Chat/message-utils.ts
+++ b/src/pages/Chat/message-utils.ts
@@ -63,6 +63,52 @@ function compactProgressiveParts(parts: string[]): string[] {
return compacted;
}
+function splitProgressiveParts(parts: string[]): string[] {
+ const segments: string[] = [];
+ let previous = '';
+
+ for (const part of parts) {
+ const current = normalizeProgressiveText(part);
+ if (!current) continue;
+
+ if (!previous) {
+ segments.push(current);
+ previous = current;
+ continue;
+ }
+
+ if (current === previous || previous.startsWith(current)) {
+ continue;
+ }
+
+ if (current.startsWith(previous)) {
+ const incremental = current.slice(previous.length).trim();
+ if (incremental) {
+ segments.push(incremental);
+ }
+ previous = current;
+ continue;
+ }
+
+ segments.push(current);
+ previous = current;
+ }
+
+ return segments;
+}
+
+function escapeRegExp(value: string): string {
+ return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
+}
+
+function consumeLeadingSegment(text: string, segment: string): number {
+ const tokens = segment.trim().split(/\s+/).filter(Boolean);
+ if (tokens.length === 0) return 0;
+ const pattern = new RegExp(`^\\s*${tokens.map(escapeRegExp).join('\\s+')}\\s*`, 'u');
+ const match = text.match(pattern);
+ return match ? match[0].length : 0;
+}
+
/**
* Extract displayable text from a message's content field.
* Handles both string content and array-of-blocks content.
@@ -102,6 +148,37 @@ export function extractText(message: RawMessage | unknown): string {
return result;
}
+export function extractTextSegments(message: RawMessage | unknown): string[] {
+ if (!message || typeof message !== 'object') return [];
+ const msg = message as Record
;
+ const content = msg.content;
+ const isUser = msg.role === 'user';
+
+ let segments: string[] = [];
+
+ if (typeof content === 'string') {
+ const cleaned = content.trim();
+ segments = cleaned ? [cleaned] : [];
+ } else if (Array.isArray(content)) {
+ const parts: string[] = [];
+ for (const block of content as ContentBlock[]) {
+ if (block.type === 'text' && block.text && block.text.trim()) {
+ parts.push(block.text);
+ }
+ }
+ segments = splitProgressiveParts(parts);
+ } else if (typeof msg.text === 'string') {
+ const cleaned = msg.text.trim();
+ segments = cleaned ? [cleaned] : [];
+ }
+
+ if (!isUser) return segments;
+
+ return segments
+ .map((segment) => cleanUserText(segment))
+ .filter((segment) => segment.length > 0);
+}
+
/**
* Extract thinking/reasoning content from a message.
* Returns null if no thinking content found.
@@ -127,6 +204,43 @@ export function extractThinking(message: RawMessage | unknown): string | null {
return combined.length > 0 ? combined : null;
}
+export function extractThinkingSegments(message: RawMessage | unknown): string[] {
+ if (!message || typeof message !== 'object') return [];
+ const msg = message as Record;
+ const content = msg.content;
+
+ if (!Array.isArray(content)) return [];
+
+ const parts: string[] = [];
+ for (const block of content as ContentBlock[]) {
+ if (block.type === 'thinking' && block.thinking) {
+ const cleaned = block.thinking.trim();
+ if (cleaned) {
+ parts.push(cleaned);
+ }
+ }
+ }
+
+ return splitProgressiveParts(parts);
+}
+
+export function stripProcessMessagePrefix(text: string, processSegments: string[]): string {
+ let remaining = text;
+ let strippedAny = false;
+
+ for (const segment of processSegments) {
+ const normalizedSegment = normalizeProgressiveText(segment);
+ if (!normalizedSegment) continue;
+ const consumed = consumeLeadingSegment(remaining, normalizedSegment);
+ if (consumed === 0) break;
+ remaining = remaining.slice(consumed);
+ strippedAny = true;
+ }
+
+ const trimmed = remaining.trimStart();
+ return strippedAny && trimmed ? trimmed : text;
+}
+
/**
* Extract media file references from Gateway-formatted user message text.
* Returns array of { filePath, mimeType } from [media attached: path (mime) | path] patterns.
diff --git a/src/pages/Chat/task-visualization.ts b/src/pages/Chat/task-visualization.ts
index 248b8b0b0..d9bdaef36 100644
--- a/src/pages/Chat/task-visualization.ts
+++ b/src/pages/Chat/task-visualization.ts
@@ -1,4 +1,4 @@
-import { extractThinking, extractToolUse } from './message-utils';
+import { extractText, extractTextSegments, extractThinkingSegments, extractToolUse } from './message-utils';
import type { RawMessage, ToolStatus } from '@/stores/chat';
export type TaskStepStatus = 'running' | 'completed' | 'error';
@@ -7,21 +7,45 @@ export interface TaskStep {
id: string;
label: string;
status: TaskStepStatus;
- kind: 'thinking' | 'tool' | 'system';
+ kind: 'thinking' | 'tool' | 'system' | 'message';
detail?: string;
depth: number;
parentId?: string;
}
-const MAX_TASK_STEPS = 8;
+/**
+ * Detects the index of the "final reply" assistant message in a run segment.
+ *
+ * The reply is the last assistant message that carries non-empty text
+ * content, regardless of whether it ALSO carries tool calls. (Mixed
+ * `text + toolCall` replies are rare but real — the model can emit a parting
+ * text block alongside a final tool call. Treating such a message as the
+ * reply avoids mis-protecting an earlier narration as the "answer" and
+ * leaking the actual last text into the fold.)
+ *
+ * When this returns a non-negative index, the caller should avoid folding
+ * that message's text into the graph (it is the answer the user sees in the
+ * chat stream). When the run is still active (streaming) the final reply is
+ * produced via `streamingMessage` instead, so callers pass
+ * `hasStreamingReply = true` to skip protection and let every assistant-with-
+ * text message in history be folded into the graph as narration.
+ */
+export function findReplyMessageIndex(messages: RawMessage[], hasStreamingReply: boolean): number {
+ if (hasStreamingReply) return -1;
+ for (let idx = messages.length - 1; idx >= 0; idx -= 1) {
+ const message = messages[idx];
+ if (!message || message.role !== 'assistant') continue;
+ if (extractText(message).trim().length === 0) continue;
+ return idx;
+ }
+ return -1;
+}
interface DeriveTaskStepsInput {
messages: RawMessage[];
streamingMessage: unknown | null;
streamingTools: ToolStatus[];
- sending: boolean;
- pendingFinal: boolean;
- showThinking: boolean;
+ omitLastStreamingMessageSegment?: boolean;
}
export interface SubagentCompletionInfo {
@@ -128,7 +152,7 @@ function attachTopology(steps: TaskStep[]): TaskStep[] {
continue;
}
- if (step.kind === 'thinking') {
+ if (step.kind === 'thinking' || step.kind === 'message') {
withTopology.push({
...step,
depth: activeBranchNodeId ? 3 : 1,
@@ -157,13 +181,37 @@ function attachTopology(steps: TaskStep[]): TaskStep[] {
return withTopology;
}
+function appendDetailSegments(
+ segments: string[],
+ options: {
+ idPrefix: string;
+ label: string;
+ kind: Extract;
+ running: boolean;
+ upsertStep: (step: TaskStep) => void;
+ },
+): void {
+ const normalizedSegments = segments
+ .map((segment) => normalizeText(segment))
+ .filter((segment): segment is string => !!segment);
+
+ normalizedSegments.forEach((detail, index) => {
+ options.upsertStep({
+ id: `${options.idPrefix}-${index}`,
+ label: options.label,
+ status: options.running && index === normalizedSegments.length - 1 ? 'running' : 'completed',
+ kind: options.kind,
+ detail,
+ depth: 1,
+ });
+ });
+}
+
export function deriveTaskSteps({
messages,
streamingMessage,
streamingTools,
- sending,
- pendingFinal,
- showThinking,
+ omitLastStreamingMessageSegment = false,
}: DeriveTaskStepsInput): TaskStep[] {
const steps: TaskStep[] = [];
const stepIndexById = new Map();
@@ -187,30 +235,44 @@ export function deriveTaskSteps({
? streamingMessage as RawMessage
: null;
- const relevantAssistantMessages = messages.filter((message) => {
- if (!message || message.role !== 'assistant') return false;
- if (extractToolUse(message).length > 0) return true;
- return showThinking && !!extractThinking(message);
- });
+ // The final answer the user sees as a chat bubble. We avoid folding it into
+ // the graph to prevent duplication. When a run is still streaming, the
+ // reply lives in `streamingMessage`, so every pure-text assistant message in
+ // `messages` is treated as intermediate narration.
+ const replyIndex = findReplyMessageIndex(messages, streamMessage != null);
- for (const [messageIndex, assistantMessage] of relevantAssistantMessages.entries()) {
- if (showThinking) {
- const thinking = extractThinking(assistantMessage);
- if (thinking) {
- upsertStep({
- id: `history-thinking-${assistantMessage.id || messageIndex}`,
- label: 'Thinking',
- status: 'completed',
- kind: 'thinking',
- detail: normalizeText(thinking),
- depth: 1,
- });
- }
- }
+ for (const [messageIndex, message] of messages.entries()) {
+ if (!message || message.role !== 'assistant') continue;
- extractToolUse(assistantMessage).forEach((tool, index) => {
+ appendDetailSegments(extractThinkingSegments(message), {
+ idPrefix: `history-thinking-${message.id || messageIndex}`,
+ label: 'Thinking',
+ kind: 'thinking',
+ running: false,
+ upsertStep,
+ });
+
+ const toolUses = extractToolUse(message);
+ // Fold any intermediate assistant text into the graph as a narration
+ // step — including text that lives on a mixed `text + toolCall` message.
+ // The narration step is emitted BEFORE the tool steps so the graph
+ // preserves the original ordering (the assistant "thinks out loud" and
+ // then invokes the tool).
+ const narrationSegments = extractTextSegments(message);
+ const graphNarrationSegments = messageIndex === replyIndex
+ ? narrationSegments.slice(0, -1)
+ : narrationSegments;
+ appendDetailSegments(graphNarrationSegments, {
+ idPrefix: `history-message-${message.id || messageIndex}`,
+ label: 'Message',
+ kind: 'message',
+ running: false,
+ upsertStep,
+ });
+
+ toolUses.forEach((tool, index) => {
upsertStep({
- id: tool.id || makeToolId(`history-tool-${assistantMessage.id || messageIndex}`, tool.name, index),
+ id: tool.id || makeToolId(`history-tool-${message.id || messageIndex}`, tool.name, index),
label: tool.name,
status: 'completed',
kind: 'tool',
@@ -220,18 +282,29 @@ export function deriveTaskSteps({
});
}
- if (streamMessage && showThinking) {
- const thinking = extractThinking(streamMessage);
- if (thinking) {
- upsertStep({
- id: 'stream-thinking',
- label: 'Thinking',
- status: 'running',
- kind: 'thinking',
- detail: normalizeText(thinking),
- depth: 1,
- });
- }
+ if (streamMessage) {
+ appendDetailSegments(extractThinkingSegments(streamMessage), {
+ idPrefix: 'stream-thinking',
+ label: 'Thinking',
+ kind: 'thinking',
+ running: true,
+ upsertStep,
+ });
+
+ // Stream-time narration should also appear in the execution graph so that
+ // intermediate process output stays in P1 instead of leaking into the
+ // assistant reply area.
+ const streamNarrationSegments = extractTextSegments(streamMessage);
+ const graphStreamNarrationSegments = omitLastStreamingMessageSegment
+ ? streamNarrationSegments.slice(0, -1)
+ : streamNarrationSegments;
+ appendDetailSegments(graphStreamNarrationSegments, {
+ idPrefix: 'stream-message',
+ label: 'Message',
+ kind: 'message',
+ running: !omitLastStreamingMessageSegment,
+ upsertStep,
+ });
}
const activeToolIds = new Set();
@@ -267,28 +340,5 @@ export function deriveTaskSteps({
});
}
- if (sending && pendingFinal) {
- upsertStep({
- id: 'system-finalizing',
- label: 'Finalizing answer',
- status: 'running',
- kind: 'system',
- detail: 'Waiting for the assistant to finish this run.',
- depth: 1,
- });
- } else if (sending && steps.length === 0) {
- upsertStep({
- id: 'system-preparing',
- label: 'Preparing run',
- status: 'running',
- kind: 'system',
- detail: 'Waiting for the first streaming update.',
- depth: 1,
- });
- }
-
- const withTopology = attachTopology(steps);
- return withTopology.length > MAX_TASK_STEPS
- ? withTopology.slice(-MAX_TASK_STEPS)
- : withTopology;
+ return attachTopology(steps);
}
diff --git a/src/stores/chat.ts b/src/stores/chat.ts
index 4d6c2f7b3..c8d4ffb93 100644
--- a/src/stores/chat.ts
+++ b/src/stores/chat.ts
@@ -1,6 +1,6 @@
/**
* Chat State Store
- * Manages chat messages, sessions, streaming, and thinking state.
+ * Manages chat messages, sessions, and streaming state.
* Communicates with OpenClaw Gateway via renderer WebSocket RPC.
*/
import { create } from 'zustand';
@@ -93,6 +93,13 @@ function buildChatEventDedupeKey(eventState: string, event: Record {
- for (const part of compactProgressiveTextParts(textBuffer)) {
- normalized.push({ type: 'text', text: part });
- }
- textBuffer = [];
- };
-
- const flushThinkingBuffer = () => {
- for (const part of compactProgressiveTextParts(thinkingBuffer)) {
- normalized.push({ type: 'thinking', thinking: part });
- }
- thinkingBuffer = [];
- };
-
- for (const block of content) {
- if (block.type === 'text' && block.text) {
- textBuffer.push(block.text);
- continue;
- }
-
- if (block.type === 'thinking' && block.thinking) {
- thinkingBuffer.push(block.thinking);
- continue;
- }
-
- flushTextBuffer();
- flushThinkingBuffer();
- normalized.push(block);
- }
-
- flushTextBuffer();
- flushThinkingBuffer();
-
- return normalized;
+ return content.map((block) => ({ ...block }));
}
function normalizeStreamingMessage(message: unknown): unknown {
@@ -1199,7 +1168,6 @@ export const useChatStore = create((set, get) => ({
sessionLabels: {},
sessionLastActivity: {},
- showThinking: true,
thinkingLevel: null,
// ── Load sessions via sessions.list ──
@@ -2269,10 +2237,6 @@ export const useChatStore = create((set, get) => ({
}
},
- // ── Toggle thinking visibility ──
-
- toggleThinking: () => set((s) => ({ showThinking: !s.showThinking })),
-
// ── Refresh: reload history + sessions ──
refresh: async () => {
diff --git a/src/stores/chat/helpers.ts b/src/stores/chat/helpers.ts
index 2f9cab017..b79454831 100644
--- a/src/stores/chat/helpers.ts
+++ b/src/stores/chat/helpers.ts
@@ -109,44 +109,7 @@ function compactProgressiveTextParts(parts: string[]): string[] {
}
function normalizeLiveContentBlocks(content: ContentBlock[]): ContentBlock[] {
- const normalized: ContentBlock[] = [];
- let textBuffer: string[] = [];
- let thinkingBuffer: string[] = [];
-
- const flushTextBuffer = () => {
- for (const part of compactProgressiveTextParts(textBuffer)) {
- normalized.push({ type: 'text', text: part });
- }
- textBuffer = [];
- };
-
- const flushThinkingBuffer = () => {
- for (const part of compactProgressiveTextParts(thinkingBuffer)) {
- normalized.push({ type: 'thinking', thinking: part });
- }
- thinkingBuffer = [];
- };
-
- for (const block of content) {
- if (block.type === 'text' && block.text) {
- textBuffer.push(block.text);
- continue;
- }
-
- if (block.type === 'thinking' && block.thinking) {
- thinkingBuffer.push(block.thinking);
- continue;
- }
-
- flushTextBuffer();
- flushThinkingBuffer();
- normalized.push(block);
- }
-
- flushTextBuffer();
- flushThinkingBuffer();
-
- return normalized;
+ return content.map((block) => ({ ...block }));
}
function normalizeStreamingMessage(message: unknown): unknown {
diff --git a/src/stores/chat/internal.ts b/src/stores/chat/internal.ts
index 2ba5c0c7c..509204f4f 100644
--- a/src/stores/chat/internal.ts
+++ b/src/stores/chat/internal.ts
@@ -21,7 +21,6 @@ export const initialChatState: Pick<
| 'currentAgentId'
| 'sessionLabels'
| 'sessionLastActivity'
- | 'showThinking'
| 'thinkingLevel'
> = {
messages: [],
@@ -43,7 +42,6 @@ export const initialChatState: Pick<
sessionLabels: {},
sessionLastActivity: {},
- showThinking: true,
thinkingLevel: null,
};
@@ -61,7 +59,6 @@ export function createChatActions(
| 'sendMessage'
| 'abortRun'
| 'handleChatEvent'
- | 'toggleThinking'
| 'refresh'
| 'clearError'
> {
diff --git a/src/stores/chat/runtime-ui-actions.ts b/src/stores/chat/runtime-ui-actions.ts
index 792b7a67e..7dbad22f0 100644
--- a/src/stores/chat/runtime-ui-actions.ts
+++ b/src/stores/chat/runtime-ui-actions.ts
@@ -1,9 +1,7 @@
import type { ChatGet, ChatSet, RuntimeActions } from './store-api';
-export function createRuntimeUiActions(set: ChatSet, get: ChatGet): Pick {
+export function createRuntimeUiActions(set: ChatSet, get: ChatGet): Pick {
return {
- toggleThinking: () => set((s) => ({ showThinking: !s.showThinking })),
-
// ── Refresh: reload history + sessions ──
refresh: async () => {
diff --git a/src/stores/chat/store-api.ts b/src/stores/chat/store-api.ts
index 8451b6882..3d716d56b 100644
--- a/src/stores/chat/store-api.ts
+++ b/src/stores/chat/store-api.ts
@@ -14,5 +14,5 @@ export type SessionHistoryActions = Pick<
export type RuntimeActions = Pick<
ChatState,
- 'sendMessage' | 'abortRun' | 'handleChatEvent' | 'toggleThinking' | 'refresh' | 'clearError'
+ 'sendMessage' | 'abortRun' | 'handleChatEvent' | 'refresh' | 'clearError'
>;
diff --git a/src/stores/chat/types.ts b/src/stores/chat/types.ts
index 8d64920a4..e5f7e6732 100644
--- a/src/stores/chat/types.ts
+++ b/src/stores/chat/types.ts
@@ -85,7 +85,6 @@ export interface ChatState {
sessionLastActivity: Record;
// Thinking
- showThinking: boolean;
thinkingLevel: string | null;
// Actions
@@ -108,7 +107,6 @@ export interface ChatState {
) => Promise;
abortRun: () => Promise;
handleChatEvent: (event: Record) => void;
- toggleThinking: () => void;
refresh: () => Promise;
clearError: () => void;
}
diff --git a/tests/e2e/chat-task-visualizer.spec.ts b/tests/e2e/chat-task-visualizer.spec.ts
index d3d97667d..3399292e7 100644
--- a/tests/e2e/chat-task-visualizer.spec.ts
+++ b/tests/e2e/chat-task-visualizer.spec.ts
@@ -148,6 +148,38 @@ const childTranscriptMessages = [
},
];
+const inFlightPrompt = 'Open browser, search for tech news, and take a screenshot';
+const seededInFlightHistory = [
+ {
+ role: 'user',
+ content: [{ type: 'text', text: inFlightPrompt }],
+ timestamp: Date.now(),
+ },
+];
+const longRunPrompt = 'Inspect the workspace and summarize the result';
+const longRunProcessSegments = Array.from({ length: 9 }, (_, index) => `Checked source ${index + 1}.`);
+const longRunSummary = 'Here is the summary.';
+const longRunReplyText = `${longRunProcessSegments.join(' ')} ${longRunSummary}`;
+const longRunHistory = [
+ {
+ role: 'user',
+ content: [{ type: 'text', text: longRunPrompt }],
+ timestamp: Date.now(),
+ },
+ ...longRunProcessSegments.map((segment, index) => ({
+ role: 'assistant',
+ id: `long-run-step-${index + 1}`,
+ content: [{ type: 'text', text: segment }],
+ timestamp: Date.now(),
+ })),
+ {
+ role: 'assistant',
+ id: 'long-run-final',
+ content: [{ type: 'text', text: longRunReplyText }],
+ timestamp: Date.now(),
+ },
+];
+
test.describe('ClawX chat execution graph', () => {
test('renders internal yield status and linked subagent branch from mocked IPC', async ({ launchElectronApp }) => {
const app = await launchElectronApp({ skipSetup: true });
@@ -222,6 +254,12 @@ test.describe('ClawX chat execution graph', () => {
}
await expect(page.getByTestId('main-layout')).toBeVisible();
await expect(page.getByTestId('chat-execution-graph')).toBeVisible({ timeout: 30_000 });
+ // Completed runs auto-collapse into a single-line summary button. Expand
+ // it first so the underlying step details are rendered.
+ const graph = page.getByTestId('chat-execution-graph');
+ if ((await graph.getAttribute('data-collapsed')) === 'true') {
+ await graph.click();
+ }
await expect(
page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('sessions_yield', { exact: true }),
).toBeVisible();
@@ -229,6 +267,9 @@ test.describe('ClawX chat execution graph', () => {
await expect(
page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('exec', { exact: true }),
).toBeVisible();
+ const execRow = page.locator('[data-testid="chat-execution-step"]').filter({ hasText: 'exec' }).first();
+ await execRow.click();
+ await expect(execRow.locator('pre')).toBeVisible();
await expect(page.locator('[data-testid="chat-execution-graph"]').getByText('I asked coder to break down the core blocks of ~/Velaria uncommitted changes; will give you the conclusion when it returns.')).toBeVisible();
await expect(page.getByText('CHECKLIST.md')).toHaveCount(0);
} finally {
@@ -252,7 +293,7 @@ test.describe('ClawX chat execution graph', () => {
[stableStringify(['chat.history', { sessionKey: PROJECT_MANAGER_SESSION_KEY, limit: 200 }])]: {
success: true,
result: {
- messages: [],
+ messages: seededInFlightHistory,
},
},
},
@@ -281,9 +322,16 @@ test.describe('ClawX chat execution graph', () => {
await app.evaluate(async ({ app: _app }) => {
const { ipcMain } = process.mainModule!.require('electron') as typeof import('electron');
- const sendPayloads: Array<{ message?: string; sessionKey?: string }> = [];
+ (globalThis as typeof globalThis & { __chatExecutionHistory?: unknown[] }).__chatExecutionHistory = [
+ {
+ role: 'user',
+ content: [{ type: 'text', text: 'Open browser, search for tech news, and take a screenshot' }],
+ timestamp: Date.now(),
+ },
+ ];
ipcMain.removeHandler('gateway:rpc');
ipcMain.handle('gateway:rpc', async (_event: unknown, method: string, payload: unknown) => {
+ void payload;
if (method === 'sessions.list') {
return {
success: true,
@@ -295,22 +343,16 @@ test.describe('ClawX chat execution graph', () => {
if (method === 'chat.history') {
return {
success: true,
- result: { messages: [] },
- };
- }
- if (method === 'chat.send') {
- if (payload && typeof payload === 'object') {
- const p = payload as { message?: string; sessionKey?: string };
- sendPayloads.push({ message: p.message, sessionKey: p.sessionKey });
- }
- return {
- success: true,
- result: { runId: 'mock-run' },
+ result: {
+ messages: (
+ (globalThis as typeof globalThis & { __chatExecutionHistory?: unknown[] }).__chatExecutionHistory
+ ?? seededInFlightHistory
+ ),
+ },
};
}
return { success: true, result: {} };
});
- (globalThis as typeof globalThis & { __clawxSendPayloads?: Array<{ message?: string; sessionKey?: string }> }).__clawxSendPayloads = sendPayloads;
});
const page = await getStableWindow(app);
@@ -323,18 +365,24 @@ test.describe('ClawX chat execution graph', () => {
}
await expect(page.getByTestId('main-layout')).toBeVisible();
- await page.getByTestId('chat-composer-input').fill('Open browser, search for tech news, and take a screenshot');
- await page.getByTestId('chat-composer-send').click();
+ await expect(page.getByText(inFlightPrompt)).toHaveCount(1);
- await expect(page.getByText('Open browser, search for tech news, and take a screenshot')).toHaveCount(1);
- await expect.poll(async () => {
- return await app.evaluate(() => {
- const sendPayloads = (globalThis as typeof globalThis & {
- __clawxSendPayloads?: Array<{ message?: string; sessionKey?: string }>;
- }).__clawxSendPayloads || [];
- return sendPayloads.length;
+ await app.evaluate(async ({ BrowserWindow }) => {
+ const win = BrowserWindow.getAllWindows()[0];
+ win?.webContents.send('gateway:notification', {
+ method: 'agent',
+ params: {
+ runId: 'mock-run',
+ sessionKey: 'agent:main:main',
+ state: 'started',
+ },
});
- }).toBe(1);
+ });
+
+ await expect(page.locator('[data-testid="chat-execution-graph"]')).toHaveAttribute('data-collapsed', 'false');
+ await expect(page.locator('[data-testid="chat-execution-step-thinking-trailing"]')).toBeVisible();
+ await expect(page.locator('[data-testid="chat-execution-step-thinking-trailing"] [aria-hidden="true"]')).toHaveCount(1);
+ await expect(page.locator('[data-testid^="chat-message-"]')).toHaveCount(1);
await app.evaluate(async ({ BrowserWindow }) => {
const win = BrowserWindow.getAllWindows()[0];
@@ -359,15 +407,143 @@ test.describe('ClawX chat execution graph', () => {
});
});
- await expect(page.getByText('Open browser, search for tech news, and take a screenshot')).toHaveCount(1);
- await expect(page.getByText(/^thinking 1 2 3$/)).toHaveCount(1);
- await expect(page.getByText(/^thinking 1 2$/)).toHaveCount(0);
- await expect(page.getByText(/^thinking 1$/)).toHaveCount(0);
- await expect(page.getByText(/^1 2 3$/)).toHaveCount(1);
- await expect(page.getByText(/^1 2$/)).toHaveCount(0);
- await expect(page.getByText(/^1$/)).toHaveCount(0);
+ await expect(page.getByText(inFlightPrompt)).toHaveCount(1);
+ // Intermediate process output should be rendered in the execution graph
+ // only, not as a streaming assistant chat bubble.
+ await expect(page.locator('[data-testid^="chat-message-"]')).toHaveCount(1);
+ await expect(page.locator('[data-testid="chat-execution-graph"]')).toHaveAttribute('data-collapsed', 'false');
+ await expect(page.locator('[data-testid="chat-execution-step-thinking-trailing"]')).toBeVisible();
+ await expect(page.locator('[data-testid="chat-execution-step-thinking-trailing"] [aria-hidden="true"]')).toHaveCount(1);
+ await expect(page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('Thinking', { exact: true })).toHaveCount(3);
+ const firstChatBubble = page.locator('[data-testid^="chat-message-"] > div').first();
+ await expect(firstChatBubble.getByText(/^1 2 3$/)).toHaveCount(0);
+
+ await app.evaluate(async ({ BrowserWindow }) => {
+ (globalThis as typeof globalThis & { __chatExecutionHistory?: unknown[] }).__chatExecutionHistory = [
+ {
+ role: 'user',
+ content: [{ type: 'text', text: 'Open browser, search for tech news, and take a screenshot' }],
+ timestamp: Date.now(),
+ },
+ {
+ role: 'assistant',
+ content: [{
+ type: 'toolCall',
+ id: 'browser-start-call',
+ name: 'browser',
+ arguments: { action: 'start' },
+ }],
+ timestamp: Date.now(),
+ },
+ {
+ role: 'assistant',
+ content: [{
+ type: 'toolCall',
+ id: 'browser-open-call',
+ name: 'browser',
+ arguments: { action: 'open', targetUrl: 'https://x.com/home' },
+ }],
+ timestamp: Date.now(),
+ },
+ {
+ role: 'assistant',
+ id: 'final-response',
+ content: [{ type: 'text', text: 'Done.' }],
+ timestamp: Date.now(),
+ },
+ ];
+ const win = BrowserWindow.getAllWindows()[0];
+ win?.webContents.send('gateway:notification', {
+ method: 'agent',
+ params: {
+ runId: 'mock-run',
+ sessionKey: 'agent:main:main',
+ state: 'final',
+ message: {
+ role: 'assistant',
+ id: 'final-response',
+ content: [{ type: 'text', text: 'Done.' }],
+ timestamp: Date.now(),
+ },
+ },
+ });
+ });
+
+ await expect(page.getByText('Done.')).toBeVisible();
+ await expect(page.locator('[data-testid="chat-execution-graph"]')).toHaveAttribute('data-collapsed', 'true');
} finally {
await closeElectronApp(app);
}
});
+
+ test('preserves long execution history counts and strips the full folded reply prefix', async ({ launchElectronApp }) => {
+ const app = await launchElectronApp({ skipSetup: true });
+
+ try {
+ await installIpcMocks(app, {
+ gatewayStatus: { state: 'running', port: 18789, pid: 12345 },
+ gatewayRpc: {
+ [stableStringify(['sessions.list', {}])]: {
+ success: true,
+ result: {
+ sessions: [{ key: PROJECT_MANAGER_SESSION_KEY, displayName: 'main' }],
+ },
+ },
+ [stableStringify(['chat.history', { sessionKey: PROJECT_MANAGER_SESSION_KEY, limit: 200 }])]: {
+ success: true,
+ result: {
+ messages: longRunHistory,
+ },
+ },
+ [stableStringify(['chat.history', { sessionKey: PROJECT_MANAGER_SESSION_KEY, limit: 1000 }])]: {
+ success: true,
+ result: {
+ messages: longRunHistory,
+ },
+ },
+ },
+ hostApi: {
+ [stableStringify(['/api/gateway/status', 'GET'])]: {
+ ok: true,
+ data: {
+ status: 200,
+ ok: true,
+ json: { state: 'running', port: 18789, pid: 12345 },
+ },
+ },
+ [stableStringify(['/api/agents', 'GET'])]: {
+ ok: true,
+ data: {
+ status: 200,
+ ok: true,
+ json: {
+ success: true,
+ agents: [{ id: 'main', name: 'main' }],
+ },
+ },
+ },
+ },
+ });
+
+ const page = await getStableWindow(app);
+ try {
+ await page.reload();
+ } catch (error) {
+ if (!String(error).includes('ERR_FILE_NOT_FOUND')) {
+ throw error;
+ }
+ }
+
+ await expect(page.getByTestId('main-layout')).toBeVisible();
+ await expect(page.getByTestId('chat-execution-graph')).toBeVisible({ timeout: 30_000 });
+ await expect(page.getByTestId('chat-execution-graph')).toHaveAttribute('data-collapsed', 'true');
+ await expect(page.getByTestId('chat-execution-graph')).toContainText('0 tool calls');
+ await expect(page.getByTestId('chat-execution-graph')).toContainText('9 process messages');
+ await expect(page.getByText(longRunSummary, { exact: true })).toBeVisible();
+ await expect(page.getByText(longRunReplyText, { exact: true })).toHaveCount(0);
+ } finally {
+ await closeElectronApp(app);
+ }
+ });
+
});
diff --git a/tests/unit/chat-event-dedupe.test.ts b/tests/unit/chat-event-dedupe.test.ts
new file mode 100644
index 000000000..a922ab722
--- /dev/null
+++ b/tests/unit/chat-event-dedupe.test.ts
@@ -0,0 +1,140 @@
+import { beforeEach, describe, expect, it, vi } from 'vitest';
+import { extractText } from '@/pages/Chat/message-utils';
+
+const { gatewayRpcMock, hostApiFetchMock, agentsState } = vi.hoisted(() => ({
+ gatewayRpcMock: vi.fn(),
+ hostApiFetchMock: vi.fn(),
+ agentsState: {
+ agents: [] as Array>,
+ },
+}));
+
+vi.mock('@/stores/gateway', () => ({
+ useGatewayStore: {
+ getState: () => ({
+ status: { state: 'running', port: 18789 },
+ rpc: gatewayRpcMock,
+ }),
+ },
+}));
+
+vi.mock('@/stores/agents', () => ({
+ useAgentsStore: {
+ getState: () => agentsState,
+ },
+}));
+
+vi.mock('@/lib/host-api', () => ({
+ hostApiFetch: (...args: unknown[]) => hostApiFetchMock(...args),
+}));
+
+describe('chat event dedupe', () => {
+ beforeEach(() => {
+ vi.resetModules();
+ window.localStorage.clear();
+ gatewayRpcMock.mockReset();
+ hostApiFetchMock.mockReset();
+ agentsState.agents = [];
+ });
+
+ it('keeps processing delta events without seq for the same run', async () => {
+ const { useChatStore } = await import('@/stores/chat');
+
+ useChatStore.setState({
+ currentSessionKey: 'agent:main:main',
+ currentAgentId: 'main',
+ sessions: [{ key: 'agent:main:main' }],
+ messages: [],
+ sessionLabels: {},
+ sessionLastActivity: {},
+ sending: false,
+ activeRunId: null,
+ streamingText: '',
+ streamingMessage: null,
+ streamingTools: [],
+ pendingFinal: true,
+ lastUserMessageAt: null,
+ pendingToolImages: [],
+ error: null,
+ loading: false,
+ thinkingLevel: null,
+ });
+
+ useChatStore.getState().handleChatEvent({
+ state: 'delta',
+ runId: 'run-no-seq',
+ sessionKey: 'agent:main:main',
+ message: {
+ role: 'assistant',
+ id: 'reply-stream',
+ content: [{ type: 'text', text: 'Checked X.' }],
+ },
+ });
+
+ useChatStore.getState().handleChatEvent({
+ state: 'delta',
+ runId: 'run-no-seq',
+ sessionKey: 'agent:main:main',
+ message: {
+ role: 'assistant',
+ id: 'reply-stream',
+ content: [
+ { type: 'text', text: 'Checked X.' },
+ { type: 'text', text: 'Checked X. Here is the summary.' },
+ ],
+ },
+ });
+
+ expect(extractText(useChatStore.getState().streamingMessage)).toBe('Checked X. Here is the summary.');
+ });
+
+ it('still dedupes repeated delta events when seq matches', async () => {
+ const { useChatStore } = await import('@/stores/chat');
+
+ useChatStore.setState({
+ currentSessionKey: 'agent:main:main',
+ currentAgentId: 'main',
+ sessions: [{ key: 'agent:main:main' }],
+ messages: [],
+ sessionLabels: {},
+ sessionLastActivity: {},
+ sending: false,
+ activeRunId: null,
+ streamingText: '',
+ streamingMessage: null,
+ streamingTools: [],
+ pendingFinal: false,
+ lastUserMessageAt: null,
+ pendingToolImages: [],
+ error: null,
+ loading: false,
+ thinkingLevel: null,
+ });
+
+ useChatStore.getState().handleChatEvent({
+ state: 'delta',
+ runId: 'run-with-seq',
+ sessionKey: 'agent:main:main',
+ seq: 3,
+ message: {
+ role: 'assistant',
+ id: 'reply-stream',
+ content: [{ type: 'text', text: 'first version' }],
+ },
+ });
+
+ useChatStore.getState().handleChatEvent({
+ state: 'delta',
+ runId: 'run-with-seq',
+ sessionKey: 'agent:main:main',
+ seq: 3,
+ message: {
+ role: 'assistant',
+ id: 'reply-stream',
+ content: [{ type: 'text', text: 'duplicate version should be ignored' }],
+ },
+ });
+
+ expect(extractText(useChatStore.getState().streamingMessage)).toBe('first version');
+ });
+});
diff --git a/tests/unit/chat-message.test.tsx b/tests/unit/chat-message.test.tsx
index d14edee78..128870548 100644
--- a/tests/unit/chat-message.test.tsx
+++ b/tests/unit/chat-message.test.tsx
@@ -23,7 +23,6 @@ describe('ChatMessage attachment dedupe', () => {
render(
,
);
diff --git a/tests/unit/chat-page-execution-graph.test.tsx b/tests/unit/chat-page-execution-graph.test.tsx
new file mode 100644
index 000000000..d76d6049c
--- /dev/null
+++ b/tests/unit/chat-page-execution-graph.test.tsx
@@ -0,0 +1,180 @@
+import { beforeEach, describe, expect, it, vi } from 'vitest';
+import { render, screen, waitFor } from '@testing-library/react';
+
+const hostApiFetchMock = vi.fn();
+
+const { gatewayState, agentsState } = vi.hoisted(() => ({
+ gatewayState: {
+ status: { state: 'running', port: 18789 },
+ },
+ agentsState: {
+ agents: [{ id: 'main', name: 'main' }] as Array>,
+ fetchAgents: vi.fn(),
+ },
+}));
+
+vi.mock('@/stores/gateway', () => ({
+ useGatewayStore: (selector: (state: typeof gatewayState) => unknown) => selector(gatewayState),
+}));
+
+vi.mock('@/stores/agents', () => ({
+ useAgentsStore: (selector: (state: typeof agentsState) => unknown) => selector(agentsState),
+}));
+
+vi.mock('@/lib/host-api', () => ({
+ hostApiFetch: (...args: unknown[]) => hostApiFetchMock(...args),
+}));
+
+vi.mock('react-i18next', () => ({
+ useTranslation: () => ({
+ t: (key: string, params?: Record) => {
+ if (key === 'executionGraph.collapsedSummary') {
+ return `collapsed ${String(params?.toolCount ?? '')} ${String(params?.processCount ?? '')}`.trim();
+ }
+ if (key === 'executionGraph.agentRun') {
+ return `Main execution`;
+ }
+ if (key === 'executionGraph.title') {
+ return 'Execution Graph';
+ }
+ if (key === 'executionGraph.collapseAction') {
+ return 'Collapse';
+ }
+ if (key === 'executionGraph.thinkingLabel') {
+ return 'Thinking';
+ }
+ if (key.startsWith('taskPanel.stepStatus.')) {
+ return key.split('.').at(-1) ?? key;
+ }
+ return key;
+ },
+ }),
+}));
+
+vi.mock('@/hooks/use-stick-to-bottom-instant', () => ({
+ useStickToBottomInstant: () => ({
+ contentRef: { current: null },
+ scrollRef: { current: null },
+ }),
+}));
+
+vi.mock('@/hooks/use-min-loading', () => ({
+ useMinLoading: () => false,
+}));
+
+vi.mock('@/pages/Chat/ChatToolbar', () => ({
+ ChatToolbar: () => null,
+}));
+
+vi.mock('@/pages/Chat/ChatInput', () => ({
+ ChatInput: () => null,
+}));
+
+describe('Chat execution graph lifecycle', () => {
+ beforeEach(async () => {
+ vi.resetModules();
+ hostApiFetchMock.mockReset();
+ hostApiFetchMock.mockResolvedValue({ success: true, messages: [] });
+ agentsState.fetchAgents.mockReset();
+
+ const { useChatStore } = await import('@/stores/chat');
+ useChatStore.setState({
+ messages: [
+ {
+ role: 'user',
+ content: 'Check semiconductor chatter',
+ },
+ {
+ role: 'assistant',
+ id: 'tool-turn',
+ content: [
+ { type: 'text', text: 'Checked X.' },
+ { type: 'tool_use', id: 'browser-search', name: 'browser', input: { action: 'search', query: 'semiconductor' } },
+ ],
+ },
+ ],
+ loading: false,
+ error: null,
+ sending: true,
+ activeRunId: 'run-live',
+ streamingText: '',
+ streamingMessage: {
+ role: 'assistant',
+ id: 'final-stream',
+ content: [
+ { type: 'text', text: 'Checked X.' },
+ { type: 'text', text: 'Checked X. Here is the summary.' },
+ ],
+ },
+ streamingTools: [
+ {
+ toolCallId: 'browser-search',
+ name: 'browser',
+ status: 'completed',
+ updatedAt: Date.now(),
+ },
+ ],
+ pendingFinal: true,
+ lastUserMessageAt: Date.now(),
+ pendingToolImages: [],
+ sessions: [{ key: 'agent:main:main' }],
+ currentSessionKey: 'agent:main:main',
+ currentAgentId: 'main',
+ sessionLabels: {},
+ sessionLastActivity: {},
+ thinkingLevel: null,
+ });
+ });
+
+ it('collapses execution once the reply starts streaming and keeps only the reply suffix in the bubble', async () => {
+ const { Chat } = await import('@/pages/Chat/index');
+
+ render();
+
+ await waitFor(() => {
+ expect(screen.getByTestId('chat-execution-graph')).toHaveAttribute('data-collapsed', 'true');
+ });
+
+ expect(screen.getByText('Here is the summary.')).toBeInTheDocument();
+ expect(screen.queryByText('Checked X. Here is the summary.')).not.toBeInTheDocument();
+ });
+
+ it('renders the execution graph immediately for an active run before any stream content arrives', async () => {
+ const { useChatStore } = await import('@/stores/chat');
+ useChatStore.setState({
+ messages: [
+ {
+ role: 'user',
+ content: 'Check semiconductor chatter',
+ },
+ ],
+ loading: false,
+ error: null,
+ sending: true,
+ activeRunId: 'run-starting',
+ streamingText: '',
+ streamingMessage: null,
+ streamingTools: [],
+ pendingFinal: false,
+ lastUserMessageAt: Date.now(),
+ pendingToolImages: [],
+ sessions: [{ key: 'agent:main:main' }],
+ currentSessionKey: 'agent:main:main',
+ currentAgentId: 'main',
+ sessionLabels: {},
+ sessionLastActivity: {},
+ thinkingLevel: null,
+ });
+
+ const { Chat } = await import('@/pages/Chat/index');
+
+ render();
+
+ await waitFor(() => {
+ expect(screen.getByTestId('chat-execution-graph')).toHaveAttribute('data-collapsed', 'false');
+ });
+
+ expect(screen.getByTestId('chat-execution-step-thinking-trailing')).toBeInTheDocument();
+ expect(screen.getAllByText('Thinking').length).toBeGreaterThan(0);
+ });
+});
diff --git a/tests/unit/chat-store-history-retry.test.ts b/tests/unit/chat-store-history-retry.test.ts
index 31eac6458..dc315d0bc 100644
--- a/tests/unit/chat-store-history-retry.test.ts
+++ b/tests/unit/chat-store-history-retry.test.ts
@@ -63,7 +63,6 @@ describe('useChatStore startup history retry', () => {
error: null,
loading: false,
thinkingLevel: null,
- showThinking: true,
});
gatewayRpcMock
@@ -115,7 +114,6 @@ describe('useChatStore startup history retry', () => {
error: null,
loading: false,
thinkingLevel: null,
- showThinking: true,
});
gatewayRpcMock
@@ -162,7 +160,6 @@ describe('useChatStore startup history retry', () => {
error: null,
loading: false,
thinkingLevel: null,
- showThinking: true,
});
let resolveFirstAttempt: ((value: { messages: Array<{ role: string; content: string; timestamp: number }> }) => void) | null = null;
@@ -242,7 +239,6 @@ describe('useChatStore startup history retry', () => {
error: null,
loading: false,
thinkingLevel: null,
- showThinking: true,
});
gatewayRpcMock.mockImplementationOnce(async () => {
diff --git a/tests/unit/chat-target-routing.test.ts b/tests/unit/chat-target-routing.test.ts
index aa61bf15a..3d70a5c45 100644
--- a/tests/unit/chat-target-routing.test.ts
+++ b/tests/unit/chat-target-routing.test.ts
@@ -104,7 +104,6 @@ describe('chat target routing', () => {
error: null,
loading: false,
thinkingLevel: null,
- showThinking: true,
});
await useChatStore.getState().sendMessage('Hello direct agent', undefined, 'research');
@@ -148,7 +147,6 @@ describe('chat target routing', () => {
error: null,
loading: false,
thinkingLevel: null,
- showThinking: true,
});
await useChatStore.getState().sendMessage(
diff --git a/tests/unit/task-visualization.test.ts b/tests/unit/task-visualization.test.ts
index 81a9f26f3..8032dadda 100644
--- a/tests/unit/task-visualization.test.ts
+++ b/tests/unit/task-visualization.test.ts
@@ -1,5 +1,6 @@
import { describe, expect, it } from 'vitest';
import { deriveTaskSteps, parseSubagentCompletionInfo } from '@/pages/Chat/task-visualization';
+import { stripProcessMessagePrefix } from '@/pages/Chat/message-utils';
import type { RawMessage, ToolStatus } from '@/stores/chat';
describe('deriveTaskSteps', () => {
@@ -23,14 +24,11 @@ describe('deriveTaskSteps', () => {
],
},
streamingTools,
- sending: true,
- pendingFinal: false,
- showThinking: true,
});
expect(steps).toEqual([
expect.objectContaining({
- id: 'stream-thinking',
+ id: 'stream-thinking-0',
label: 'Thinking',
status: 'running',
kind: 'thinking',
@@ -69,9 +67,6 @@ describe('deriveTaskSteps', () => {
summary: 'Scanning files',
},
],
- sending: true,
- pendingFinal: false,
- showThinking: false,
});
expect(steps).toEqual([
@@ -111,9 +106,6 @@ describe('deriveTaskSteps', () => {
summary: 'Permission denied',
},
],
- sending: true,
- pendingFinal: false,
- showThinking: false,
});
expect(steps).toEqual([
@@ -127,7 +119,7 @@ describe('deriveTaskSteps', () => {
]);
});
- it('keeps the newest running step when the execution graph exceeds the max length', () => {
+ it('keeps all steps when the execution graph exceeds the previous max length', () => {
const messages: RawMessage[] = Array.from({ length: 9 }, (_, index) => ({
role: 'assistant',
id: `assistant-${index}`,
@@ -153,12 +145,14 @@ describe('deriveTaskSteps', () => {
summary: 'Scanning current workspace',
},
],
- sending: true,
- pendingFinal: false,
- showThinking: false,
});
- expect(steps).toHaveLength(8);
+ expect(steps).toHaveLength(10);
+ expect(steps[0]).toEqual(expect.objectContaining({
+ id: 'tool-0',
+ label: 'read_0',
+ status: 'completed',
+ }));
expect(steps.at(-1)).toEqual(expect.objectContaining({
id: 'tool-live',
label: 'grep_live',
@@ -182,14 +176,11 @@ describe('deriveTaskSteps', () => {
messages,
streamingMessage: null,
streamingTools: [],
- sending: false,
- pendingFinal: false,
- showThinking: true,
});
expect(steps).toEqual([
expect.objectContaining({
- id: 'history-thinking-assistant-1',
+ id: 'history-thinking-assistant-1-0',
label: 'Thinking',
status: 'completed',
kind: 'thinking',
@@ -203,31 +194,106 @@ describe('deriveTaskSteps', () => {
]);
});
- it('collapses cumulative streaming thinking details into the newest version', () => {
+ it('splits cumulative streaming thinking into separate execution steps', () => {
const steps = deriveTaskSteps({
messages: [],
streamingMessage: {
role: 'assistant',
content: [
- { type: 'thinking', thinking: 'thinking 1' },
- { type: 'thinking', thinking: 'thinking 1 2' },
- { type: 'thinking', thinking: 'thinking 1 2 3' },
+ { type: 'thinking', thinking: 'Reviewing X.' },
+ { type: 'thinking', thinking: 'Reviewing X. Comparing Y.' },
+ { type: 'thinking', thinking: 'Reviewing X. Comparing Y. Drafting answer.' },
],
},
streamingTools: [],
- sending: true,
- pendingFinal: false,
- showThinking: true,
});
expect(steps).toEqual([
expect.objectContaining({
- id: 'stream-thinking',
- detail: 'thinking 1 2 3',
+ id: 'stream-thinking-0',
+ detail: 'Reviewing X.',
+ status: 'completed',
+ }),
+ expect.objectContaining({
+ id: 'stream-thinking-1',
+ detail: 'Comparing Y.',
+ status: 'completed',
+ }),
+ expect.objectContaining({
+ id: 'stream-thinking-2',
+ detail: 'Drafting answer.',
+ status: 'running',
}),
]);
});
+ it('keeps earlier reply segments in the graph when the last streaming segment is rendered separately', () => {
+ const steps = deriveTaskSteps({
+ messages: [],
+ streamingMessage: {
+ role: 'assistant',
+ content: [
+ { type: 'text', text: 'Checked X.' },
+ { type: 'text', text: 'Checked X. Checked Snowball.' },
+ { type: 'text', text: 'Checked X. Checked Snowball. Here is the summary.' },
+ ],
+ },
+ streamingTools: [],
+ omitLastStreamingMessageSegment: true,
+ });
+
+ expect(steps).toEqual([
+ expect.objectContaining({
+ id: 'stream-message-0',
+ detail: 'Checked X.',
+ status: 'completed',
+ }),
+ expect.objectContaining({
+ id: 'stream-message-1',
+ detail: 'Checked Snowball.',
+ status: 'completed',
+ }),
+ ]);
+ });
+
+ it('folds earlier reply segments into the graph but leaves the final answer for the chat bubble', () => {
+ const steps = deriveTaskSteps({
+ messages: [
+ {
+ role: 'assistant',
+ id: 'assistant-reply',
+ content: [
+ { type: 'text', text: 'Checked X.' },
+ { type: 'text', text: 'Checked X. Checked Snowball.' },
+ { type: 'text', text: 'Checked X. Checked Snowball. Here is the summary.' },
+ ],
+ },
+ ],
+ streamingMessage: null,
+ streamingTools: [],
+ });
+
+ expect(steps).toEqual([
+ expect.objectContaining({
+ id: 'history-message-assistant-reply-0',
+ detail: 'Checked X.',
+ status: 'completed',
+ }),
+ expect.objectContaining({
+ id: 'history-message-assistant-reply-1',
+ detail: 'Checked Snowball.',
+ status: 'completed',
+ }),
+ ]);
+ });
+
+ it('strips folded process narration from the final reply text', () => {
+ expect(stripProcessMessagePrefix(
+ 'Checked X. Checked Snowball. Here is the summary.',
+ ['Checked X.', 'Checked Snowball.'],
+ )).toBe('Here is the summary.');
+ });
+
it('builds a branch for spawned subagents', () => {
const messages: RawMessage[] = [
{
@@ -254,9 +320,6 @@ describe('deriveTaskSteps', () => {
messages,
streamingMessage: null,
streamingTools: [],
- sending: false,
- pendingFinal: false,
- showThinking: true,
});
expect(steps).toEqual([