Refine chat tool status dedupe (#786)

Co-authored-by: zuolingxuan <zuolingxuan@bytedance.com>
This commit is contained in:
Lingxuan Zuo
2026-04-08 15:05:27 +08:00
committed by GitHub
Unverified
parent 32d14b8cf9
commit 25b13ab912
11 changed files with 328 additions and 78 deletions

View File

@@ -18,6 +18,7 @@ interface ChatMessageProps {
message: RawMessage;
showThinking: boolean;
suppressToolCards?: boolean;
suppressProcessAttachments?: boolean;
isStreaming?: boolean;
streamingTools?: Array<{
id?: string;
@@ -42,6 +43,7 @@ export const ChatMessage = memo(function ChatMessage({
message,
showThinking,
suppressToolCards = false,
suppressProcessAttachments = false,
isStreaming = false,
streamingTools = [],
}: ChatMessageProps) {
@@ -55,8 +57,12 @@ export const ChatMessage = memo(function ChatMessage({
const tools = extractToolUse(message);
const visibleThinking = showThinking ? thinking : null;
const visibleTools = suppressToolCards ? [] : tools;
const shouldHideProcessAttachments = suppressProcessAttachments
&& (hasText || !!visibleThinking || images.length > 0 || visibleTools.length > 0);
const attachedFiles = message._attachedFiles || [];
const attachedFiles = shouldHideProcessAttachments
? (message._attachedFiles || []).filter((file) => file.source !== 'tool-result')
: (message._attachedFiles || []);
const [lightboxImg, setLightboxImg] = useState<{ src: string; fileName: string; filePath?: string; base64?: string; mimeType?: string } | null>(null);
// Never render tool result messages in chat UI

View File

@@ -221,7 +221,7 @@ export function Chat() {
active: isLatestOpenRun,
agentLabel: segmentAgentLabel,
sessionLabel: segmentSessionLabel,
segmentEnd: replyIndex ?? (nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1),
segmentEnd: nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1,
steps,
}];
});
@@ -257,6 +257,7 @@ export function Chat() {
message={msg}
showThinking={showThinking}
suppressToolCards={suppressToolCards}
suppressProcessAttachments={suppressToolCards}
/>
{userRunCards
.filter((card) => card.triggerIndex === idx)

View File

@@ -166,23 +166,64 @@ export function deriveTaskSteps({
showThinking,
}: DeriveTaskStepsInput): TaskStep[] {
const steps: TaskStep[] = [];
const seenIds = new Set<string>();
const activeToolNames = new Set<string>();
const stepIndexById = new Map<string, number>();
const pushStep = (step: TaskStep): void => {
if (seenIds.has(step.id)) return;
seenIds.add(step.id);
steps.push(step);
const upsertStep = (step: TaskStep): void => {
const existingIndex = stepIndexById.get(step.id);
if (existingIndex == null) {
stepIndexById.set(step.id, steps.length);
steps.push(step);
return;
}
const existing = steps[existingIndex];
steps[existingIndex] = {
...existing,
...step,
detail: step.detail ?? existing.detail,
};
};
const streamMessage = streamingMessage && typeof streamingMessage === 'object'
? streamingMessage as RawMessage
: null;
const relevantAssistantMessages = messages.filter((message) => {
if (!message || message.role !== 'assistant') return false;
if (extractToolUse(message).length > 0) return true;
return showThinking && !!extractThinking(message);
});
for (const [messageIndex, assistantMessage] of relevantAssistantMessages.entries()) {
if (showThinking) {
const thinking = extractThinking(assistantMessage);
if (thinking) {
upsertStep({
id: `history-thinking-${assistantMessage.id || messageIndex}`,
label: 'Thinking',
status: 'completed',
kind: 'thinking',
detail: normalizeText(thinking),
depth: 1,
});
}
}
extractToolUse(assistantMessage).forEach((tool, index) => {
upsertStep({
id: tool.id || makeToolId(`history-tool-${assistantMessage.id || messageIndex}`, tool.name, index),
label: tool.name,
status: 'completed',
kind: 'tool',
detail: normalizeText(JSON.stringify(tool.input, null, 2)),
depth: 1,
});
});
}
if (streamMessage && showThinking) {
const thinking = extractThinking(streamMessage);
if (thinking) {
pushStep({
upsertStep({
id: 'stream-thinking',
label: 'Thinking',
status: 'running',
@@ -193,10 +234,16 @@ export function deriveTaskSteps({
}
}
const activeToolIds = new Set<string>();
const activeToolNamesWithoutIds = new Set<string>();
streamingTools.forEach((tool, index) => {
activeToolNames.add(tool.name);
pushStep({
id: tool.toolCallId || tool.id || makeToolId('stream-status', tool.name, index),
const id = tool.toolCallId || tool.id || makeToolId('stream-status', tool.name, index);
activeToolIds.add(id);
if (!tool.toolCallId && !tool.id) {
activeToolNamesWithoutIds.add(tool.name);
}
upsertStep({
id,
label: tool.name,
status: tool.status,
kind: 'tool',
@@ -207,9 +254,10 @@ export function deriveTaskSteps({
if (streamMessage) {
extractToolUse(streamMessage).forEach((tool, index) => {
if (activeToolNames.has(tool.name)) return;
pushStep({
id: tool.id || makeToolId('stream-tool', tool.name, index),
const id = tool.id || makeToolId('stream-tool', tool.name, index);
if (activeToolIds.has(id) || activeToolNamesWithoutIds.has(tool.name)) return;
upsertStep({
id,
label: tool.name,
status: 'running',
kind: 'tool',
@@ -220,59 +268,27 @@ export function deriveTaskSteps({
}
if (sending && pendingFinal) {
pushStep({
id: 'system-finalizing',
label: 'Finalizing answer',
status: 'running',
upsertStep({
id: 'system-finalizing',
label: 'Finalizing answer',
status: 'running',
kind: 'system',
detail: 'Waiting for the assistant to finish this run.',
depth: 1,
});
} else if (sending && steps.length === 0) {
pushStep({
id: 'system-preparing',
label: 'Preparing run',
status: 'running',
upsertStep({
id: 'system-preparing',
label: 'Preparing run',
status: 'running',
kind: 'system',
detail: 'Waiting for the first streaming update.',
depth: 1,
});
}
if (steps.length === 0) {
const relevantAssistantMessages = messages.filter((message) => {
if (!message || message.role !== 'assistant') return false;
if (extractToolUse(message).length > 0) return true;
return showThinking && !!extractThinking(message);
});
for (const [messageIndex, assistantMessage] of relevantAssistantMessages.entries()) {
if (showThinking) {
const thinking = extractThinking(assistantMessage);
if (thinking) {
pushStep({
id: `history-thinking-${assistantMessage.id || messageIndex}`,
label: 'Thinking',
status: 'completed',
kind: 'thinking',
detail: normalizeText(thinking),
depth: 1,
});
}
}
extractToolUse(assistantMessage).forEach((tool, index) => {
pushStep({
id: tool.id || makeToolId(`history-tool-${assistantMessage.id || messageIndex}`, tool.name, index),
label: tool.name,
status: 'completed',
kind: 'tool',
detail: normalizeText(JSON.stringify(tool.input, null, 2)),
depth: 1,
});
});
}
}
return attachTopology(steps).slice(0, MAX_TASK_STEPS);
const withTopology = attachTopology(steps);
return withTopology.length > MAX_TASK_STEPS
? withTopology.slice(-MAX_TASK_STEPS)
: withTopology;
}

View File

@@ -75,6 +75,13 @@ function upsertImageCacheEntry(filePath: string, file: Omit<AttachedFileMeta, 'f
saveImageCache(_imageCache);
}
function withAttachedFileSource(
file: AttachedFileMeta,
source: AttachedFileMeta['source'],
): AttachedFileMeta {
return file.source ? file : { ...file, source };
}
/** Extract plain text from message content (string or content blocks) */
function getMessageText(content: unknown): string {
if (typeof content === 'string') return content;
@@ -228,11 +235,14 @@ function extractImagesAsAttachedFiles(content: unknown): AttachedFileMeta[] {
/**
* Build an AttachedFileMeta entry for a file ref, using cache if available.
*/
function makeAttachedFile(ref: { filePath: string; mimeType: string }): AttachedFileMeta {
function makeAttachedFile(
ref: { filePath: string; mimeType: string },
source: AttachedFileMeta['source'] = 'message-ref',
): AttachedFileMeta {
const cached = _imageCache.get(ref.filePath);
if (cached) return { ...cached, filePath: ref.filePath };
if (cached) return { ...cached, filePath: ref.filePath, source };
const fileName = ref.filePath.split(/[\\/]/).pop() || 'file';
return { fileName, mimeType: ref.mimeType, fileSize: 0, preview: null, filePath: ref.filePath };
return { fileName, mimeType: ref.mimeType, fileSize: 0, preview: null, filePath: ref.filePath, source };
}
/**
@@ -345,7 +355,7 @@ function enrichWithToolResultFiles(messages: RawMessage[]): RawMessage[] {
}
}
}
pending.push(...imageFiles);
pending.push(...imageFiles.map((file) => withAttachedFileSource(file, 'tool-result')));
// 2. [media attached: ...] patterns in tool result text output
const text = getMessageText(msg.content);
@@ -353,12 +363,12 @@ function enrichWithToolResultFiles(messages: RawMessage[]): RawMessage[] {
const mediaRefs = extractMediaRefs(text);
const mediaRefPaths = new Set(mediaRefs.map(r => r.filePath));
for (const ref of mediaRefs) {
pending.push(makeAttachedFile(ref));
pending.push(makeAttachedFile(ref, 'tool-result'));
}
// 3. Raw file paths in tool result text (documents, audio, video, etc.)
for (const ref of extractRawFilePaths(text)) {
if (!mediaRefPaths.has(ref.filePath)) {
pending.push(makeAttachedFile(ref));
pending.push(makeAttachedFile(ref, 'tool-result'));
}
}
}
@@ -435,9 +445,9 @@ function enrichWithCachedImages(messages: RawMessage[]): RawMessage[] {
const files: AttachedFileMeta[] = allRefs.map(ref => {
const cached = _imageCache.get(ref.filePath);
if (cached) return { ...cached, filePath: ref.filePath };
if (cached) return { ...cached, filePath: ref.filePath, source: 'message-ref' };
const fileName = ref.filePath.split(/[\\/]/).pop() || 'file';
return { fileName, mimeType: ref.mimeType, fileSize: 0, preview: null, filePath: ref.filePath };
return { fileName, mimeType: ref.mimeType, fileSize: 0, preview: null, filePath: ref.filePath, source: 'message-ref' };
});
return { ...msg, _attachedFiles: files };
});

View File

@@ -86,9 +86,8 @@ export function handleRuntimeEventState(
: undefined;
// Mirror enrichWithToolResultFiles: collect images + file refs for next assistant msg
const toolFiles: AttachedFileMeta[] = [
...extractImagesAsAttachedFiles(finalMsg.content),
];
const toolFiles: AttachedFileMeta[] = extractImagesAsAttachedFiles(finalMsg.content)
.map((file) => (file.source ? file : { ...file, source: 'tool-result' }));
if (matchedPath) {
for (const f of toolFiles) {
if (!f.filePath) {
@@ -101,9 +100,9 @@ export function handleRuntimeEventState(
if (text) {
const mediaRefs = extractMediaRefs(text);
const mediaRefPaths = new Set(mediaRefs.map(r => r.filePath));
for (const ref of mediaRefs) toolFiles.push(makeAttachedFile(ref));
for (const ref of mediaRefs) toolFiles.push(makeAttachedFile(ref, 'tool-result'));
for (const ref of extractRawFilePaths(text)) {
if (!mediaRefPaths.has(ref.filePath)) toolFiles.push(makeAttachedFile(ref));
if (!mediaRefPaths.has(ref.filePath)) toolFiles.push(makeAttachedFile(ref, 'tool-result'));
}
}
set((s) => {

View File

@@ -94,6 +94,7 @@ export function createRuntimeSendActions(set: ChatSet, get: ChatGet): Pick<Runti
fileSize: a.fileSize,
preview: a.preview,
filePath: a.stagedPath,
source: 'user-upload',
})),
};
set((s) => ({

View File

@@ -5,6 +5,7 @@ export interface AttachedFileMeta {
fileSize: number;
preview: string | null;
filePath?: string;
source?: 'user-upload' | 'tool-result' | 'message-ref';
}
/** Raw message from OpenClaw chat.history */

View File

@@ -95,6 +95,16 @@ status: completed successfully`,
{
role: 'assistant',
content: [{ type: 'text', text: '我让 coder 分析完了,下面是结论。' }],
_attachedFiles: [
{
fileName: 'CHECKLIST.md',
mimeType: 'text/markdown',
fileSize: 433,
preview: null,
filePath: '/Users/bytedance/.openclaw/workspace/CHECKLIST.md',
source: 'tool-result',
},
],
timestamp: Date.now(),
},
];
@@ -203,7 +213,13 @@ test.describe('ClawX chat execution graph', () => {
});
const page = await getStableWindow(app);
await page.reload();
try {
await page.reload();
} catch (error) {
if (!String(error).includes('ERR_FILE_NOT_FOUND')) {
throw error;
}
}
await expect(page.getByTestId('main-layout')).toBeVisible();
await expect(page.getByTestId('chat-execution-graph')).toBeVisible({ timeout: 30_000 });
await expect(
@@ -214,6 +230,7 @@ test.describe('ClawX chat execution graph', () => {
page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('exec', { exact: true }),
).toBeVisible();
await expect(page.locator('[data-testid="chat-execution-graph"]').getByText('我让 coder 去拆 ~/Velaria 当前未提交改动的核心块了,等它回来我直接给你结论。')).toBeVisible();
await expect(page.getByText('CHECKLIST.md')).toHaveCount(0);
} finally {
await closeElectronApp(app);
}

View File

@@ -0,0 +1,33 @@
import { describe, expect, it } from 'vitest';
import { render, screen } from '@testing-library/react';
import { ChatMessage } from '@/pages/Chat/ChatMessage';
import type { RawMessage } from '@/stores/chat';
describe('ChatMessage attachment dedupe', () => {
it('keeps attachment-only assistant replies visible even when process attachments are suppressed', () => {
const message: RawMessage = {
role: 'assistant',
content: [],
_attachedFiles: [
{
fileName: 'artifact.png',
mimeType: 'image/png',
fileSize: 0,
preview: '/tmp/artifact.png',
filePath: '/tmp/artifact.png',
source: 'tool-result',
},
],
};
render(
<ChatMessage
message={message}
showThinking={false}
suppressProcessAttachments
/>,
);
expect(screen.getByAltText('artifact.png')).toBeInTheDocument();
});
});

View File

@@ -11,13 +11,14 @@ const getToolCallFilePath = vi.fn(() => undefined);
const hasErrorRecoveryTimer = vi.fn(() => false);
const hasNonToolAssistantContent = vi.fn(() => true);
const isToolOnlyMessage = vi.fn(() => false);
const isToolResultRole = vi.fn((role: unknown) => role === 'toolresult');
const makeAttachedFile = vi.fn((ref: { filePath: string; mimeType: string }) => ({
const isToolResultRole = vi.fn((role: unknown) => role === 'toolresult' || role === 'toolResult' || role === 'tool_result');
const makeAttachedFile = vi.fn((ref: { filePath: string; mimeType: string }, source?: 'user-upload' | 'tool-result' | 'message-ref') => ({
fileName: ref.filePath.split('/').pop() || 'file',
mimeType: ref.mimeType,
fileSize: 0,
preview: null,
filePath: ref.filePath,
source,
}));
const setErrorRecoveryTimer = vi.fn();
const upsertToolStatuses = vi.fn((_current, updates) => updates);
@@ -128,6 +129,51 @@ describe('chat runtime event handlers', () => {
expect(h.read().loadHistory).toHaveBeenCalledTimes(1);
});
it('marks tool-result attachments before appending them to the final assistant reply', async () => {
extractMediaRefs.mockReturnValue([{ filePath: '/tmp/CHECKLIST.md', mimeType: 'text/markdown' }]);
getMessageText.mockReturnValue('[media attached: /tmp/CHECKLIST.md (text/markdown) | /tmp/CHECKLIST.md]');
hasNonToolAssistantContent.mockReturnValue(true);
const { handleRuntimeEventState } = await import('@/stores/chat/runtime-event-handlers');
const h = makeHarness({
pendingToolImages: [],
streamingMessage: {
role: 'assistant',
id: 'streaming-assistant',
content: [{ type: 'tool_use', id: 'call-1', name: 'read', input: { filePath: '/tmp/CHECKLIST.md' } }],
},
});
handleRuntimeEventState(h.set as never, h.get as never, {
message: {
role: 'toolResult',
toolCallId: 'call-1',
toolName: 'read',
content: [{ type: 'text', text: '[media attached: /tmp/CHECKLIST.md (text/markdown) | /tmp/CHECKLIST.md]' }],
},
}, 'final', 'run-4');
handleRuntimeEventState(h.set as never, h.get as never, {
message: {
role: 'assistant',
id: 'final-assistant',
content: [{ type: 'text', text: 'Done.' }],
},
}, 'final', 'run-4');
expect(h.read().messages).toEqual(expect.arrayContaining([
expect.objectContaining({
id: 'final-assistant',
_attachedFiles: [
expect.objectContaining({
filePath: '/tmp/CHECKLIST.md',
source: 'tool-result',
}),
],
}),
]));
});
it('handles error event and finalizes immediately when not sending', async () => {
const { handleRuntimeEventState } = await import('@/stores/chat/runtime-event-handlers');
const h = makeHarness({ sending: false, activeRunId: 'r1', lastUserMessageAt: 123 });
@@ -203,4 +249,3 @@ describe('chat runtime event handlers', () => {
expect(next.pendingToolImages).toEqual([]);
});
});

View File

@@ -43,6 +43,129 @@ describe('deriveTaskSteps', () => {
]);
});
it('keeps completed tool steps visible while a later tool is still streaming', () => {
const steps = deriveTaskSteps({
messages: [
{
role: 'assistant',
id: 'assistant-history',
content: [
{ type: 'tool_use', id: 'tool-read', name: 'read', input: { filePath: '/tmp/a.md' } },
],
},
],
streamingMessage: {
role: 'assistant',
content: [
{ type: 'tool_use', id: 'tool-grep', name: 'grep', input: { pattern: 'TODO' } },
],
},
streamingTools: [
{
toolCallId: 'tool-grep',
name: 'grep',
status: 'running',
updatedAt: Date.now(),
summary: 'Scanning files',
},
],
sending: true,
pendingFinal: false,
showThinking: false,
});
expect(steps).toEqual([
expect.objectContaining({
id: 'tool-read',
label: 'read',
status: 'completed',
kind: 'tool',
}),
expect.objectContaining({
id: 'tool-grep',
label: 'grep',
status: 'running',
kind: 'tool',
}),
]);
});
it('upgrades a completed historical tool step when streaming status reports a later state', () => {
const steps = deriveTaskSteps({
messages: [
{
role: 'assistant',
id: 'assistant-history',
content: [
{ type: 'tool_use', id: 'tool-read', name: 'read', input: { filePath: '/tmp/a.md' } },
],
},
],
streamingMessage: null,
streamingTools: [
{
toolCallId: 'tool-read',
name: 'read',
status: 'error',
updatedAt: Date.now(),
summary: 'Permission denied',
},
],
sending: true,
pendingFinal: false,
showThinking: false,
});
expect(steps).toEqual([
expect.objectContaining({
id: 'tool-read',
label: 'read',
status: 'error',
kind: 'tool',
detail: 'Permission denied',
}),
]);
});
it('keeps the newest running step when the execution graph exceeds the max length', () => {
const messages: RawMessage[] = Array.from({ length: 9 }, (_, index) => ({
role: 'assistant',
id: `assistant-${index}`,
content: [
{ type: 'tool_use', id: `tool-${index}`, name: `read_${index}`, input: { filePath: `/tmp/${index}.md` } },
],
}));
const steps = deriveTaskSteps({
messages,
streamingMessage: {
role: 'assistant',
content: [
{ type: 'tool_use', id: 'tool-live', name: 'grep_live', input: { pattern: 'TODO' } },
],
},
streamingTools: [
{
toolCallId: 'tool-live',
name: 'grep_live',
status: 'running',
updatedAt: Date.now(),
summary: 'Scanning current workspace',
},
],
sending: true,
pendingFinal: false,
showThinking: false,
});
expect(steps).toHaveLength(8);
expect(steps.at(-1)).toEqual(expect.objectContaining({
id: 'tool-live',
label: 'grep_live',
status: 'running',
}));
});
it('keeps recent completed steps from assistant history', () => {
const messages: RawMessage[] = [
{
@@ -70,14 +193,12 @@ describe('deriveTaskSteps', () => {
label: 'Thinking',
status: 'completed',
kind: 'thinking',
depth: 1,
}),
expect.objectContaining({
id: 'tool-2',
label: 'read_file',
status: 'completed',
kind: 'tool',
depth: 1,
}),
]);
});