1414 lines
52 KiB
TypeScript
1414 lines
52 KiB
TypeScript
/**
|
|
* Chat State Store
|
|
* Manages chat messages, sessions, streaming, and thinking state.
|
|
* Communicates with OpenClaw Gateway via gateway:rpc IPC.
|
|
*/
|
|
import { create } from 'zustand';
|
|
|
|
// ── Types ────────────────────────────────────────────────────────
|
|
|
|
/** Metadata for locally-attached files (not from Gateway) */
|
|
export interface AttachedFileMeta {
|
|
fileName: string;
|
|
mimeType: string;
|
|
fileSize: number;
|
|
preview: string | null;
|
|
filePath?: string;
|
|
}
|
|
|
|
/** Raw message from OpenClaw chat.history */
|
|
export interface RawMessage {
|
|
role: 'user' | 'assistant' | 'system' | 'toolresult';
|
|
content: unknown; // string | ContentBlock[]
|
|
timestamp?: number;
|
|
id?: string;
|
|
toolCallId?: string;
|
|
toolName?: string;
|
|
details?: unknown;
|
|
isError?: boolean;
|
|
/** Local-only: file metadata for user-uploaded attachments (not sent to/from Gateway) */
|
|
_attachedFiles?: AttachedFileMeta[];
|
|
}
|
|
|
|
/** Content block inside a message */
|
|
export interface ContentBlock {
|
|
type: 'text' | 'image' | 'thinking' | 'tool_use' | 'tool_result' | 'toolCall' | 'toolResult';
|
|
text?: string;
|
|
thinking?: string;
|
|
source?: { type: string; media_type?: string; data?: string; url?: string };
|
|
/** Flat image format from Gateway tool results (no source wrapper) */
|
|
data?: string;
|
|
mimeType?: string;
|
|
id?: string;
|
|
name?: string;
|
|
input?: unknown;
|
|
arguments?: unknown;
|
|
content?: unknown;
|
|
}
|
|
|
|
/** Session from sessions.list */
|
|
export interface ChatSession {
|
|
key: string;
|
|
label?: string;
|
|
displayName?: string;
|
|
thinkingLevel?: string;
|
|
model?: string;
|
|
}
|
|
|
|
export interface ToolStatus {
|
|
id?: string;
|
|
toolCallId?: string;
|
|
name: string;
|
|
status: 'running' | 'completed' | 'error';
|
|
durationMs?: number;
|
|
summary?: string;
|
|
updatedAt: number;
|
|
}
|
|
|
|
interface ChatState {
|
|
// Messages
|
|
messages: RawMessage[];
|
|
loading: boolean;
|
|
error: string | null;
|
|
|
|
// Streaming
|
|
sending: boolean;
|
|
activeRunId: string | null;
|
|
streamingText: string;
|
|
streamingMessage: unknown | null;
|
|
streamingTools: ToolStatus[];
|
|
pendingFinal: boolean;
|
|
lastUserMessageAt: number | null;
|
|
/** Images collected from tool results, attached to the next assistant message */
|
|
pendingToolImages: AttachedFileMeta[];
|
|
|
|
// Sessions
|
|
sessions: ChatSession[];
|
|
currentSessionKey: string;
|
|
|
|
// Thinking
|
|
showThinking: boolean;
|
|
thinkingLevel: string | null;
|
|
|
|
// Actions
|
|
loadSessions: () => Promise<void>;
|
|
switchSession: (key: string) => void;
|
|
newSession: () => void;
|
|
loadHistory: (quiet?: boolean) => Promise<void>;
|
|
sendMessage: (text: string, attachments?: Array<{ fileName: string; mimeType: string; fileSize: number; stagedPath: string; preview: string | null }>) => Promise<void>;
|
|
abortRun: () => Promise<void>;
|
|
handleChatEvent: (event: Record<string, unknown>) => void;
|
|
toggleThinking: () => void;
|
|
refresh: () => Promise<void>;
|
|
clearError: () => void;
|
|
}
|
|
|
|
const DEFAULT_CANONICAL_PREFIX = 'agent:main';
|
|
const DEFAULT_SESSION_KEY = `${DEFAULT_CANONICAL_PREFIX}:main`;
|
|
|
|
// ── Local image cache ─────────────────────────────────────────
|
|
// The Gateway doesn't store image attachments in session content blocks,
|
|
// so we cache them locally keyed by staged file path (which appears in the
|
|
// [media attached: <path> ...] reference in the Gateway's user message text).
|
|
// Keying by path avoids the race condition of keying by runId (which is only
|
|
// available after the RPC returns, but history may load before that).
|
|
const IMAGE_CACHE_KEY = 'clawx:image-cache';
|
|
const IMAGE_CACHE_MAX = 100; // max entries to prevent unbounded growth
|
|
|
|
function loadImageCache(): Map<string, AttachedFileMeta> {
|
|
try {
|
|
const raw = localStorage.getItem(IMAGE_CACHE_KEY);
|
|
if (raw) {
|
|
const entries = JSON.parse(raw) as Array<[string, AttachedFileMeta]>;
|
|
return new Map(entries);
|
|
}
|
|
} catch { /* ignore parse errors */ }
|
|
return new Map();
|
|
}
|
|
|
|
function saveImageCache(cache: Map<string, AttachedFileMeta>): void {
|
|
try {
|
|
// Evict oldest entries if over limit
|
|
const entries = Array.from(cache.entries());
|
|
const trimmed = entries.length > IMAGE_CACHE_MAX
|
|
? entries.slice(entries.length - IMAGE_CACHE_MAX)
|
|
: entries;
|
|
localStorage.setItem(IMAGE_CACHE_KEY, JSON.stringify(trimmed));
|
|
} catch { /* ignore quota errors */ }
|
|
}
|
|
|
|
const _imageCache = loadImageCache();
|
|
|
|
/** Extract plain text from message content (string or content blocks) */
|
|
function getMessageText(content: unknown): string {
|
|
if (typeof content === 'string') return content;
|
|
if (Array.isArray(content)) {
|
|
return (content as Array<{ type?: string; text?: string }>)
|
|
.filter(b => b.type === 'text' && b.text)
|
|
.map(b => b.text!)
|
|
.join('\n');
|
|
}
|
|
return '';
|
|
}
|
|
|
|
/** Extract media file refs from [media attached: <path> (<mime>) | ...] patterns */
|
|
function extractMediaRefs(text: string): Array<{ filePath: string; mimeType: string }> {
|
|
const refs: Array<{ filePath: string; mimeType: string }> = [];
|
|
const regex = /\[media attached:\s*([^\s(]+)\s*\(([^)]+)\)\s*\|[^\]]*\]/g;
|
|
let match;
|
|
while ((match = regex.exec(text)) !== null) {
|
|
refs.push({ filePath: match[1], mimeType: match[2] });
|
|
}
|
|
return refs;
|
|
}
|
|
|
|
/** Map common file extensions to MIME types */
|
|
function mimeFromExtension(filePath: string): string {
|
|
const ext = filePath.split('.').pop()?.toLowerCase() || '';
|
|
const map: Record<string, string> = {
|
|
// Images
|
|
'png': 'image/png',
|
|
'jpg': 'image/jpeg',
|
|
'jpeg': 'image/jpeg',
|
|
'gif': 'image/gif',
|
|
'webp': 'image/webp',
|
|
'bmp': 'image/bmp',
|
|
'avif': 'image/avif',
|
|
'svg': 'image/svg+xml',
|
|
// Documents
|
|
'pdf': 'application/pdf',
|
|
'doc': 'application/msword',
|
|
'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
'xls': 'application/vnd.ms-excel',
|
|
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
'ppt': 'application/vnd.ms-powerpoint',
|
|
'pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
|
|
'txt': 'text/plain',
|
|
'csv': 'text/csv',
|
|
'md': 'text/markdown',
|
|
'rtf': 'application/rtf',
|
|
'epub': 'application/epub+zip',
|
|
// Archives
|
|
'zip': 'application/zip',
|
|
'tar': 'application/x-tar',
|
|
'gz': 'application/gzip',
|
|
'rar': 'application/vnd.rar',
|
|
'7z': 'application/x-7z-compressed',
|
|
// Audio
|
|
'mp3': 'audio/mpeg',
|
|
'wav': 'audio/wav',
|
|
'ogg': 'audio/ogg',
|
|
'aac': 'audio/aac',
|
|
'flac': 'audio/flac',
|
|
'm4a': 'audio/mp4',
|
|
// Video
|
|
'mp4': 'video/mp4',
|
|
'mov': 'video/quicktime',
|
|
'avi': 'video/x-msvideo',
|
|
'mkv': 'video/x-matroska',
|
|
'webm': 'video/webm',
|
|
'm4v': 'video/mp4',
|
|
};
|
|
return map[ext] || 'application/octet-stream';
|
|
}
|
|
|
|
/**
|
|
* Extract raw file paths from message text.
|
|
* Detects absolute paths (Unix: / or ~/, Windows: C:\ etc.) ending with common file extensions.
|
|
* Handles both image and non-image files, consistent with channel push message behavior.
|
|
*/
|
|
function extractRawFilePaths(text: string): Array<{ filePath: string; mimeType: string }> {
|
|
const refs: Array<{ filePath: string; mimeType: string }> = [];
|
|
const seen = new Set<string>();
|
|
const exts = 'png|jpe?g|gif|webp|bmp|avif|svg|pdf|docx?|xlsx?|pptx?|txt|csv|md|rtf|epub|zip|tar|gz|rar|7z|mp3|wav|ogg|aac|flac|m4a|mp4|mov|avi|mkv|webm|m4v';
|
|
// Unix absolute paths (/... or ~/...)
|
|
const unixRegex = new RegExp(`((?:\\/|~\\/)[^\\s\\n"'()\\[\\],<>]*?\\.(?:${exts}))`, 'gi');
|
|
// Windows absolute paths (C:\... D:\...)
|
|
const winRegex = new RegExp(`([A-Za-z]:\\\\[^\\s\\n"'()\\[\\],<>]*?\\.(?:${exts}))`, 'gi');
|
|
for (const regex of [unixRegex, winRegex]) {
|
|
let match;
|
|
while ((match = regex.exec(text)) !== null) {
|
|
const p = match[1];
|
|
if (p && !seen.has(p)) {
|
|
seen.add(p);
|
|
refs.push({ filePath: p, mimeType: mimeFromExtension(p) });
|
|
}
|
|
}
|
|
}
|
|
return refs;
|
|
}
|
|
|
|
/**
|
|
* Extract images from a content array (including nested tool_result content).
|
|
* Converts them to AttachedFileMeta entries with preview set to data URL or remote URL.
|
|
*/
|
|
function extractImagesAsAttachedFiles(content: unknown): AttachedFileMeta[] {
|
|
if (!Array.isArray(content)) return [];
|
|
const files: AttachedFileMeta[] = [];
|
|
|
|
for (const block of content as ContentBlock[]) {
|
|
if (block.type === 'image') {
|
|
// Path 1: Anthropic source-wrapped format {source: {type, media_type, data}}
|
|
if (block.source) {
|
|
const src = block.source;
|
|
const mimeType = src.media_type || 'image/jpeg';
|
|
|
|
if (src.type === 'base64' && src.data) {
|
|
files.push({
|
|
fileName: 'image',
|
|
mimeType,
|
|
fileSize: 0,
|
|
preview: `data:${mimeType};base64,${src.data}`,
|
|
});
|
|
} else if (src.type === 'url' && src.url) {
|
|
files.push({
|
|
fileName: 'image',
|
|
mimeType,
|
|
fileSize: 0,
|
|
preview: src.url,
|
|
});
|
|
}
|
|
}
|
|
// Path 2: Flat format from Gateway tool results {data, mimeType}
|
|
else if (block.data) {
|
|
const mimeType = block.mimeType || 'image/jpeg';
|
|
files.push({
|
|
fileName: 'image',
|
|
mimeType,
|
|
fileSize: 0,
|
|
preview: `data:${mimeType};base64,${block.data}`,
|
|
});
|
|
}
|
|
}
|
|
// Recurse into tool_result content blocks
|
|
if ((block.type === 'tool_result' || block.type === 'toolResult') && block.content) {
|
|
files.push(...extractImagesAsAttachedFiles(block.content));
|
|
}
|
|
}
|
|
return files;
|
|
}
|
|
|
|
/**
|
|
* Build an AttachedFileMeta entry for a file ref, using cache if available.
|
|
*/
|
|
function makeAttachedFile(ref: { filePath: string; mimeType: string }): AttachedFileMeta {
|
|
const cached = _imageCache.get(ref.filePath);
|
|
if (cached) return { ...cached, filePath: ref.filePath };
|
|
const fileName = ref.filePath.split(/[\\/]/).pop() || 'file';
|
|
return { fileName, mimeType: ref.mimeType, fileSize: 0, preview: null, filePath: ref.filePath };
|
|
}
|
|
|
|
/**
|
|
* Extract file path from a tool call's arguments by toolCallId.
|
|
* Searches common argument names: file_path, filePath, path, file.
|
|
*/
|
|
function getToolCallFilePath(msg: RawMessage, toolCallId: string): string | undefined {
|
|
if (!toolCallId) return undefined;
|
|
|
|
// Anthropic/normalized format — toolCall blocks in content array
|
|
const content = msg.content;
|
|
if (Array.isArray(content)) {
|
|
for (const block of content as ContentBlock[]) {
|
|
if ((block.type === 'tool_use' || block.type === 'toolCall') && block.id === toolCallId) {
|
|
const args = (block.input ?? block.arguments) as Record<string, unknown> | undefined;
|
|
if (args) {
|
|
const fp = args.file_path ?? args.filePath ?? args.path ?? args.file;
|
|
if (typeof fp === 'string') return fp;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// OpenAI format — tool_calls array on the message itself
|
|
const msgAny = msg as unknown as Record<string, unknown>;
|
|
const toolCalls = msgAny.tool_calls ?? msgAny.toolCalls;
|
|
if (Array.isArray(toolCalls)) {
|
|
for (const tc of toolCalls as Array<Record<string, unknown>>) {
|
|
if (tc.id !== toolCallId) continue;
|
|
const fn = (tc.function ?? tc) as Record<string, unknown>;
|
|
let args: Record<string, unknown> | undefined;
|
|
try {
|
|
args = typeof fn.arguments === 'string' ? JSON.parse(fn.arguments) : (fn.arguments ?? fn.input) as Record<string, unknown>;
|
|
} catch { /* ignore */ }
|
|
if (args) {
|
|
const fp = args.file_path ?? args.filePath ?? args.path ?? args.file;
|
|
if (typeof fp === 'string') return fp;
|
|
}
|
|
}
|
|
}
|
|
|
|
return undefined;
|
|
}
|
|
|
|
/**
|
|
* Collect all tool call file paths from a message into a Map<toolCallId, filePath>.
|
|
*/
|
|
function collectToolCallPaths(msg: RawMessage, paths: Map<string, string>): void {
|
|
const content = msg.content;
|
|
if (Array.isArray(content)) {
|
|
for (const block of content as ContentBlock[]) {
|
|
if ((block.type === 'tool_use' || block.type === 'toolCall') && block.id) {
|
|
const args = (block.input ?? block.arguments) as Record<string, unknown> | undefined;
|
|
if (args) {
|
|
const fp = args.file_path ?? args.filePath ?? args.path ?? args.file;
|
|
if (typeof fp === 'string') paths.set(block.id, fp);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
const msgAny = msg as unknown as Record<string, unknown>;
|
|
const toolCalls = msgAny.tool_calls ?? msgAny.toolCalls;
|
|
if (Array.isArray(toolCalls)) {
|
|
for (const tc of toolCalls as Array<Record<string, unknown>>) {
|
|
const id = typeof tc.id === 'string' ? tc.id : '';
|
|
if (!id) continue;
|
|
const fn = (tc.function ?? tc) as Record<string, unknown>;
|
|
let args: Record<string, unknown> | undefined;
|
|
try {
|
|
args = typeof fn.arguments === 'string' ? JSON.parse(fn.arguments) : (fn.arguments ?? fn.input) as Record<string, unknown>;
|
|
} catch { /* ignore */ }
|
|
if (args) {
|
|
const fp = args.file_path ?? args.filePath ?? args.path ?? args.file;
|
|
if (typeof fp === 'string') paths.set(id, fp);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Before filtering tool_result messages from history, scan them for any file/image
|
|
* content and attach those to the immediately following assistant message.
|
|
* This mirrors channel push message behavior where tool outputs surface files to the UI.
|
|
* Handles:
|
|
* - Image content blocks (base64 / url)
|
|
* - [media attached: path (mime) | path] text patterns in tool result output
|
|
* - Raw file paths in tool result text
|
|
*/
|
|
function enrichWithToolResultFiles(messages: RawMessage[]): RawMessage[] {
|
|
const pending: AttachedFileMeta[] = [];
|
|
const toolCallPaths = new Map<string, string>();
|
|
|
|
return messages.map((msg) => {
|
|
// Track file paths from assistant tool call arguments for later matching
|
|
if (msg.role === 'assistant') {
|
|
collectToolCallPaths(msg, toolCallPaths);
|
|
}
|
|
|
|
if (isToolResultRole(msg.role)) {
|
|
// Resolve file path from the matching tool call
|
|
const matchedPath = msg.toolCallId ? toolCallPaths.get(msg.toolCallId) : undefined;
|
|
|
|
// 1. Image/file content blocks in the structured content array
|
|
const imageFiles = extractImagesAsAttachedFiles(msg.content);
|
|
if (matchedPath) {
|
|
for (const f of imageFiles) {
|
|
if (!f.filePath) {
|
|
f.filePath = matchedPath;
|
|
f.fileName = matchedPath.split(/[\\/]/).pop() || 'image';
|
|
}
|
|
}
|
|
}
|
|
pending.push(...imageFiles);
|
|
|
|
// 2. [media attached: ...] patterns in tool result text output
|
|
const text = getMessageText(msg.content);
|
|
if (text) {
|
|
const mediaRefs = extractMediaRefs(text);
|
|
const mediaRefPaths = new Set(mediaRefs.map(r => r.filePath));
|
|
for (const ref of mediaRefs) {
|
|
pending.push(makeAttachedFile(ref));
|
|
}
|
|
// 3. Raw file paths in tool result text (documents, audio, video, etc.)
|
|
for (const ref of extractRawFilePaths(text)) {
|
|
if (!mediaRefPaths.has(ref.filePath)) {
|
|
pending.push(makeAttachedFile(ref));
|
|
}
|
|
}
|
|
}
|
|
|
|
return msg; // will be filtered later
|
|
}
|
|
|
|
if (msg.role === 'assistant' && pending.length > 0) {
|
|
const toAttach = pending.splice(0);
|
|
// Deduplicate against files already on the assistant message
|
|
const existingPaths = new Set(
|
|
(msg._attachedFiles || []).map(f => f.filePath).filter(Boolean),
|
|
);
|
|
const newFiles = toAttach.filter(f => !f.filePath || !existingPaths.has(f.filePath));
|
|
if (newFiles.length === 0) return msg;
|
|
return {
|
|
...msg,
|
|
_attachedFiles: [...(msg._attachedFiles || []), ...newFiles],
|
|
};
|
|
}
|
|
|
|
return msg;
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Restore _attachedFiles for messages loaded from history.
|
|
* Handles:
|
|
* 1. [media attached: path (mime) | path] patterns (attachment-button flow)
|
|
* 2. Raw image file paths typed in message text (e.g. /Users/.../image.png)
|
|
* Uses local cache for previews when available; missing previews are loaded async.
|
|
*/
|
|
function enrichWithCachedImages(messages: RawMessage[]): RawMessage[] {
|
|
return messages.map((msg, idx) => {
|
|
// Only process user and assistant messages; skip if already enriched
|
|
if ((msg.role !== 'user' && msg.role !== 'assistant') || msg._attachedFiles) return msg;
|
|
const text = getMessageText(msg.content);
|
|
|
|
// Path 1: [media attached: path (mime) | path] — guaranteed format from attachment button
|
|
const mediaRefs = extractMediaRefs(text);
|
|
const mediaRefPaths = new Set(mediaRefs.map(r => r.filePath));
|
|
|
|
// Path 2: Raw file paths.
|
|
// For assistant messages: scan own text AND the nearest preceding user message text,
|
|
// but only for non-tool-only assistant messages (i.e. the final answer turn).
|
|
// Tool-only messages (thinking + tool calls) should not show file previews — those
|
|
// belong to the final answer message that comes after the tool results.
|
|
// User messages never get raw-path previews so the image is not shown twice.
|
|
let rawRefs: Array<{ filePath: string; mimeType: string }> = [];
|
|
if (msg.role === 'assistant' && !isToolOnlyMessage(msg)) {
|
|
// Own text
|
|
rawRefs = extractRawFilePaths(text).filter(r => !mediaRefPaths.has(r.filePath));
|
|
|
|
// Nearest preceding user message text (look back up to 5 messages)
|
|
const seenPaths = new Set(rawRefs.map(r => r.filePath));
|
|
for (let i = idx - 1; i >= Math.max(0, idx - 5); i--) {
|
|
const prev = messages[i];
|
|
if (!prev) break;
|
|
if (prev.role === 'user') {
|
|
const prevText = getMessageText(prev.content);
|
|
for (const ref of extractRawFilePaths(prevText)) {
|
|
if (!mediaRefPaths.has(ref.filePath) && !seenPaths.has(ref.filePath)) {
|
|
seenPaths.add(ref.filePath);
|
|
rawRefs.push(ref);
|
|
}
|
|
}
|
|
break; // only use the nearest user message
|
|
}
|
|
}
|
|
}
|
|
|
|
const allRefs = [...mediaRefs, ...rawRefs];
|
|
if (allRefs.length === 0) return msg;
|
|
|
|
const files: AttachedFileMeta[] = allRefs.map(ref => {
|
|
const cached = _imageCache.get(ref.filePath);
|
|
if (cached) return { ...cached, filePath: ref.filePath };
|
|
const fileName = ref.filePath.split(/[\\/]/).pop() || 'file';
|
|
return { fileName, mimeType: ref.mimeType, fileSize: 0, preview: null, filePath: ref.filePath };
|
|
});
|
|
return { ...msg, _attachedFiles: files };
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Async: load missing previews from disk via IPC for messages that have
|
|
* _attachedFiles with null previews. Updates messages in-place and triggers re-render.
|
|
* Handles both [media attached: ...] patterns and raw filePath entries.
|
|
*/
|
|
async function loadMissingPreviews(messages: RawMessage[]): Promise<boolean> {
|
|
// Collect all image paths that need previews
|
|
const needPreview: Array<{ filePath: string; mimeType: string }> = [];
|
|
const seenPaths = new Set<string>();
|
|
|
|
for (const msg of messages) {
|
|
if (!msg._attachedFiles) continue;
|
|
|
|
// Path 1: files with explicit filePath field (raw path detection or enriched refs)
|
|
for (const file of msg._attachedFiles) {
|
|
const fp = file.filePath;
|
|
if (!fp || seenPaths.has(fp)) continue;
|
|
// Images: need preview. Non-images: need file size (for FileCard display).
|
|
const needsLoad = file.mimeType.startsWith('image/')
|
|
? !file.preview
|
|
: file.fileSize === 0;
|
|
if (needsLoad) {
|
|
seenPaths.add(fp);
|
|
needPreview.push({ filePath: fp, mimeType: file.mimeType });
|
|
}
|
|
}
|
|
|
|
// Path 2: [media attached: ...] patterns (legacy — in case filePath wasn't stored)
|
|
if (msg.role === 'user') {
|
|
const text = getMessageText(msg.content);
|
|
const refs = extractMediaRefs(text);
|
|
for (let i = 0; i < refs.length; i++) {
|
|
const file = msg._attachedFiles[i];
|
|
const ref = refs[i];
|
|
if (!file || !ref || seenPaths.has(ref.filePath)) continue;
|
|
const needsLoad = ref.mimeType.startsWith('image/') ? !file.preview : file.fileSize === 0;
|
|
if (needsLoad) {
|
|
seenPaths.add(ref.filePath);
|
|
needPreview.push(ref);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (needPreview.length === 0) return false;
|
|
|
|
try {
|
|
const thumbnails = await window.electron.ipcRenderer.invoke(
|
|
'media:getThumbnails',
|
|
needPreview,
|
|
) as Record<string, { preview: string | null; fileSize: number }>;
|
|
|
|
let updated = false;
|
|
for (const msg of messages) {
|
|
if (!msg._attachedFiles) continue;
|
|
|
|
// Update files that have filePath
|
|
for (const file of msg._attachedFiles) {
|
|
const fp = file.filePath;
|
|
if (!fp) continue;
|
|
const thumb = thumbnails[fp];
|
|
if (thumb && (thumb.preview || thumb.fileSize)) {
|
|
if (thumb.preview) file.preview = thumb.preview;
|
|
if (thumb.fileSize) file.fileSize = thumb.fileSize;
|
|
_imageCache.set(fp, { ...file });
|
|
updated = true;
|
|
}
|
|
}
|
|
|
|
// Legacy: update by index for [media attached: ...] refs
|
|
if (msg.role === 'user') {
|
|
const text = getMessageText(msg.content);
|
|
const refs = extractMediaRefs(text);
|
|
for (let i = 0; i < refs.length; i++) {
|
|
const file = msg._attachedFiles[i];
|
|
const ref = refs[i];
|
|
if (!file || !ref || file.filePath) continue; // skip if already handled via filePath
|
|
const thumb = thumbnails[ref.filePath];
|
|
if (thumb && (thumb.preview || thumb.fileSize)) {
|
|
if (thumb.preview) file.preview = thumb.preview;
|
|
if (thumb.fileSize) file.fileSize = thumb.fileSize;
|
|
_imageCache.set(ref.filePath, { ...file });
|
|
updated = true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
if (updated) saveImageCache(_imageCache);
|
|
return updated;
|
|
} catch (err) {
|
|
console.warn('[loadMissingPreviews] Failed:', err);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
function getCanonicalPrefixFromSessions(sessions: ChatSession[]): string | null {
|
|
const canonical = sessions.find((s) => s.key.startsWith('agent:'))?.key;
|
|
if (!canonical) return null;
|
|
const parts = canonical.split(':');
|
|
if (parts.length < 2) return null;
|
|
return `${parts[0]}:${parts[1]}`;
|
|
}
|
|
|
|
function isToolOnlyMessage(message: RawMessage | undefined): boolean {
|
|
if (!message) return false;
|
|
if (isToolResultRole(message.role)) return true;
|
|
|
|
const msg = message as unknown as Record<string, unknown>;
|
|
const content = message.content;
|
|
|
|
// Check OpenAI-format tool_calls field (real-time streaming from OpenAI-compatible models)
|
|
const toolCalls = msg.tool_calls ?? msg.toolCalls;
|
|
const hasOpenAITools = Array.isArray(toolCalls) && toolCalls.length > 0;
|
|
|
|
if (!Array.isArray(content)) {
|
|
// Content is not an array — check if there's OpenAI-format tool_calls
|
|
if (hasOpenAITools) {
|
|
// Has tool calls but content might be empty/string — treat as tool-only
|
|
// if there's no meaningful text content
|
|
const textContent = typeof content === 'string' ? content.trim() : '';
|
|
return textContent.length === 0;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
let hasTool = hasOpenAITools;
|
|
let hasText = false;
|
|
let hasNonToolContent = false;
|
|
|
|
for (const block of content as ContentBlock[]) {
|
|
if (block.type === 'tool_use' || block.type === 'tool_result' || block.type === 'toolCall' || block.type === 'toolResult') {
|
|
hasTool = true;
|
|
continue;
|
|
}
|
|
if (block.type === 'text' && block.text && block.text.trim()) {
|
|
hasText = true;
|
|
continue;
|
|
}
|
|
// Only actual image output disqualifies a tool-only message.
|
|
// Thinking blocks are internal reasoning that can accompany tool_use — they
|
|
// should NOT prevent the message from being treated as an intermediate tool step.
|
|
if (block.type === 'image') {
|
|
hasNonToolContent = true;
|
|
}
|
|
}
|
|
|
|
return hasTool && !hasText && !hasNonToolContent;
|
|
}
|
|
|
|
function isToolResultRole(role: unknown): boolean {
|
|
if (!role) return false;
|
|
const normalized = String(role).toLowerCase();
|
|
return normalized === 'toolresult' || normalized === 'tool_result';
|
|
}
|
|
|
|
function extractTextFromContent(content: unknown): string {
|
|
if (typeof content === 'string') return content;
|
|
if (!Array.isArray(content)) return '';
|
|
const parts: string[] = [];
|
|
for (const block of content as ContentBlock[]) {
|
|
if (block.type === 'text' && block.text) {
|
|
parts.push(block.text);
|
|
}
|
|
}
|
|
return parts.join('\n');
|
|
}
|
|
|
|
function summarizeToolOutput(text: string): string | undefined {
|
|
const trimmed = text.trim();
|
|
if (!trimmed) return undefined;
|
|
const lines = trimmed.split(/\r?\n/).map((line) => line.trim()).filter(Boolean);
|
|
if (lines.length === 0) return undefined;
|
|
const summaryLines = lines.slice(0, 2);
|
|
let summary = summaryLines.join(' / ');
|
|
if (summary.length > 160) {
|
|
summary = `${summary.slice(0, 157)}...`;
|
|
}
|
|
return summary;
|
|
}
|
|
|
|
function normalizeToolStatus(rawStatus: unknown, fallback: 'running' | 'completed'): ToolStatus['status'] {
|
|
const status = typeof rawStatus === 'string' ? rawStatus.toLowerCase() : '';
|
|
if (status === 'error' || status === 'failed') return 'error';
|
|
if (status === 'completed' || status === 'success' || status === 'done') return 'completed';
|
|
return fallback;
|
|
}
|
|
|
|
function parseDurationMs(value: unknown): number | undefined {
|
|
if (typeof value === 'number' && Number.isFinite(value)) return value;
|
|
const parsed = typeof value === 'string' ? Number(value) : NaN;
|
|
return Number.isFinite(parsed) ? parsed : undefined;
|
|
}
|
|
|
|
function extractToolUseUpdates(message: unknown): ToolStatus[] {
|
|
if (!message || typeof message !== 'object') return [];
|
|
const msg = message as Record<string, unknown>;
|
|
const updates: ToolStatus[] = [];
|
|
|
|
// Path 1: Anthropic/normalized format — tool blocks inside content array
|
|
const content = msg.content;
|
|
if (Array.isArray(content)) {
|
|
for (const block of content as ContentBlock[]) {
|
|
if ((block.type !== 'tool_use' && block.type !== 'toolCall') || !block.name) continue;
|
|
updates.push({
|
|
id: block.id || block.name,
|
|
toolCallId: block.id,
|
|
name: block.name,
|
|
status: 'running',
|
|
updatedAt: Date.now(),
|
|
});
|
|
}
|
|
}
|
|
|
|
// Path 2: OpenAI format — tool_calls array on the message itself
|
|
if (updates.length === 0) {
|
|
const toolCalls = msg.tool_calls ?? msg.toolCalls;
|
|
if (Array.isArray(toolCalls)) {
|
|
for (const tc of toolCalls as Array<Record<string, unknown>>) {
|
|
const fn = (tc.function ?? tc) as Record<string, unknown>;
|
|
const name = typeof fn.name === 'string' ? fn.name : '';
|
|
if (!name) continue;
|
|
const id = typeof tc.id === 'string' ? tc.id : name;
|
|
updates.push({
|
|
id,
|
|
toolCallId: typeof tc.id === 'string' ? tc.id : undefined,
|
|
name,
|
|
status: 'running',
|
|
updatedAt: Date.now(),
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
return updates;
|
|
}
|
|
|
|
function extractToolResultBlocks(message: unknown, eventState: string): ToolStatus[] {
|
|
if (!message || typeof message !== 'object') return [];
|
|
const msg = message as Record<string, unknown>;
|
|
const content = msg.content;
|
|
if (!Array.isArray(content)) return [];
|
|
|
|
const updates: ToolStatus[] = [];
|
|
for (const block of content as ContentBlock[]) {
|
|
if (block.type !== 'tool_result' && block.type !== 'toolResult') continue;
|
|
const outputText = extractTextFromContent(block.content ?? block.text ?? '');
|
|
const summary = summarizeToolOutput(outputText);
|
|
updates.push({
|
|
id: block.id || block.name || 'tool',
|
|
toolCallId: block.id,
|
|
name: block.name || block.id || 'tool',
|
|
status: normalizeToolStatus(undefined, eventState === 'delta' ? 'running' : 'completed'),
|
|
summary,
|
|
updatedAt: Date.now(),
|
|
});
|
|
}
|
|
|
|
return updates;
|
|
}
|
|
|
|
function extractToolResultUpdate(message: unknown, eventState: string): ToolStatus | null {
|
|
if (!message || typeof message !== 'object') return null;
|
|
const msg = message as Record<string, unknown>;
|
|
const role = typeof msg.role === 'string' ? msg.role.toLowerCase() : '';
|
|
if (!isToolResultRole(role)) return null;
|
|
|
|
const toolName = typeof msg.toolName === 'string' ? msg.toolName : (typeof msg.name === 'string' ? msg.name : '');
|
|
const toolCallId = typeof msg.toolCallId === 'string' ? msg.toolCallId : undefined;
|
|
const details = (msg.details && typeof msg.details === 'object') ? msg.details as Record<string, unknown> : undefined;
|
|
const rawStatus = (msg.status ?? details?.status);
|
|
const fallback = eventState === 'delta' ? 'running' : 'completed';
|
|
const status = normalizeToolStatus(rawStatus, fallback);
|
|
const durationMs = parseDurationMs(details?.durationMs ?? details?.duration ?? (msg as Record<string, unknown>).durationMs);
|
|
|
|
const outputText = (details && typeof details.aggregated === 'string')
|
|
? details.aggregated
|
|
: extractTextFromContent(msg.content);
|
|
const summary = summarizeToolOutput(outputText) ?? summarizeToolOutput(String(details?.error ?? msg.error ?? ''));
|
|
|
|
const name = toolName || toolCallId || 'tool';
|
|
const id = toolCallId || name;
|
|
|
|
return {
|
|
id,
|
|
toolCallId,
|
|
name,
|
|
status,
|
|
durationMs,
|
|
summary,
|
|
updatedAt: Date.now(),
|
|
};
|
|
}
|
|
|
|
function mergeToolStatus(existing: ToolStatus['status'], incoming: ToolStatus['status']): ToolStatus['status'] {
|
|
const order: Record<ToolStatus['status'], number> = { running: 0, completed: 1, error: 2 };
|
|
return order[incoming] >= order[existing] ? incoming : existing;
|
|
}
|
|
|
|
function upsertToolStatuses(current: ToolStatus[], updates: ToolStatus[]): ToolStatus[] {
|
|
if (updates.length === 0) return current;
|
|
const next = [...current];
|
|
for (const update of updates) {
|
|
const key = update.toolCallId || update.id || update.name;
|
|
if (!key) continue;
|
|
const index = next.findIndex((tool) => (tool.toolCallId || tool.id || tool.name) === key);
|
|
if (index === -1) {
|
|
next.push(update);
|
|
continue;
|
|
}
|
|
const existing = next[index];
|
|
next[index] = {
|
|
...existing,
|
|
...update,
|
|
name: update.name || existing.name,
|
|
status: mergeToolStatus(existing.status, update.status),
|
|
durationMs: update.durationMs ?? existing.durationMs,
|
|
summary: update.summary ?? existing.summary,
|
|
updatedAt: update.updatedAt || existing.updatedAt,
|
|
};
|
|
}
|
|
return next;
|
|
}
|
|
|
|
function collectToolUpdates(message: unknown, eventState: string): ToolStatus[] {
|
|
const updates: ToolStatus[] = [];
|
|
const toolResultUpdate = extractToolResultUpdate(message, eventState);
|
|
if (toolResultUpdate) updates.push(toolResultUpdate);
|
|
updates.push(...extractToolResultBlocks(message, eventState));
|
|
updates.push(...extractToolUseUpdates(message));
|
|
return updates;
|
|
}
|
|
|
|
function hasNonToolAssistantContent(message: RawMessage | undefined): boolean {
|
|
if (!message) return false;
|
|
if (typeof message.content === 'string' && message.content.trim()) return true;
|
|
|
|
const content = message.content;
|
|
if (Array.isArray(content)) {
|
|
for (const block of content as ContentBlock[]) {
|
|
if (block.type === 'text' && block.text && block.text.trim()) return true;
|
|
if (block.type === 'thinking' && block.thinking && block.thinking.trim()) return true;
|
|
if (block.type === 'image') return true;
|
|
}
|
|
}
|
|
|
|
const msg = message as unknown as Record<string, unknown>;
|
|
if (typeof msg.text === 'string' && msg.text.trim()) return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
// ── Store ────────────────────────────────────────────────────────
|
|
|
|
export const useChatStore = create<ChatState>((set, get) => ({
|
|
messages: [],
|
|
loading: false,
|
|
error: null,
|
|
|
|
sending: false,
|
|
activeRunId: null,
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
streamingTools: [],
|
|
pendingFinal: false,
|
|
lastUserMessageAt: null,
|
|
pendingToolImages: [],
|
|
|
|
sessions: [],
|
|
currentSessionKey: DEFAULT_SESSION_KEY,
|
|
|
|
showThinking: true,
|
|
thinkingLevel: null,
|
|
|
|
// ── Load sessions via sessions.list ──
|
|
|
|
loadSessions: async () => {
|
|
try {
|
|
const result = await window.electron.ipcRenderer.invoke(
|
|
'gateway:rpc',
|
|
'sessions.list',
|
|
{ limit: 50 }
|
|
) as { success: boolean; result?: Record<string, unknown>; error?: string };
|
|
|
|
if (result.success && result.result) {
|
|
const data = result.result;
|
|
const rawSessions = Array.isArray(data.sessions) ? data.sessions : [];
|
|
const sessions: ChatSession[] = rawSessions.map((s: Record<string, unknown>) => ({
|
|
key: String(s.key || ''),
|
|
label: s.label ? String(s.label) : undefined,
|
|
displayName: s.displayName ? String(s.displayName) : undefined,
|
|
thinkingLevel: s.thinkingLevel ? String(s.thinkingLevel) : undefined,
|
|
model: s.model ? String(s.model) : undefined,
|
|
})).filter((s: ChatSession) => s.key);
|
|
|
|
const canonicalBySuffix = new Map<string, string>();
|
|
for (const session of sessions) {
|
|
if (!session.key.startsWith('agent:')) continue;
|
|
const parts = session.key.split(':');
|
|
if (parts.length < 3) continue;
|
|
const suffix = parts.slice(2).join(':');
|
|
if (suffix && !canonicalBySuffix.has(suffix)) {
|
|
canonicalBySuffix.set(suffix, session.key);
|
|
}
|
|
}
|
|
|
|
// Deduplicate: if both short and canonical existed, keep canonical only
|
|
const seen = new Set<string>();
|
|
const dedupedSessions = sessions.filter((s) => {
|
|
if (!s.key.startsWith('agent:') && canonicalBySuffix.has(s.key)) return false;
|
|
if (seen.has(s.key)) return false;
|
|
seen.add(s.key);
|
|
return true;
|
|
});
|
|
|
|
const { currentSessionKey } = get();
|
|
let nextSessionKey = currentSessionKey || DEFAULT_SESSION_KEY;
|
|
if (!nextSessionKey.startsWith('agent:')) {
|
|
const canonicalMatch = canonicalBySuffix.get(nextSessionKey);
|
|
if (canonicalMatch) {
|
|
nextSessionKey = canonicalMatch;
|
|
}
|
|
}
|
|
if (!dedupedSessions.find((s) => s.key === nextSessionKey) && dedupedSessions.length > 0) {
|
|
// Current session not found at all — switch to the first available session
|
|
nextSessionKey = dedupedSessions[0].key;
|
|
}
|
|
|
|
const sessionsWithCurrent = !dedupedSessions.find((s) => s.key === nextSessionKey) && nextSessionKey
|
|
? [
|
|
...dedupedSessions,
|
|
{ key: nextSessionKey, displayName: nextSessionKey },
|
|
]
|
|
: dedupedSessions;
|
|
|
|
set({ sessions: sessionsWithCurrent, currentSessionKey: nextSessionKey });
|
|
|
|
if (currentSessionKey !== nextSessionKey) {
|
|
get().loadHistory();
|
|
}
|
|
}
|
|
} catch (err) {
|
|
console.warn('Failed to load sessions:', err);
|
|
}
|
|
},
|
|
|
|
// ── Switch session ──
|
|
|
|
switchSession: (key: string) => {
|
|
set({
|
|
currentSessionKey: key,
|
|
messages: [],
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
streamingTools: [],
|
|
activeRunId: null,
|
|
error: null,
|
|
pendingFinal: false,
|
|
lastUserMessageAt: null,
|
|
pendingToolImages: [],
|
|
});
|
|
// Load history for new session
|
|
get().loadHistory();
|
|
},
|
|
|
|
// ── New session ──
|
|
|
|
newSession: () => {
|
|
// Generate a new unique session key and switch to it
|
|
const prefix = getCanonicalPrefixFromSessions(get().sessions) ?? DEFAULT_CANONICAL_PREFIX;
|
|
const newKey = `${prefix}:session-${Date.now()}`;
|
|
const newSessionEntry: ChatSession = { key: newKey, displayName: newKey };
|
|
set((s) => ({
|
|
currentSessionKey: newKey,
|
|
sessions: [...s.sessions, newSessionEntry],
|
|
messages: [],
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
streamingTools: [],
|
|
activeRunId: null,
|
|
error: null,
|
|
pendingFinal: false,
|
|
lastUserMessageAt: null,
|
|
pendingToolImages: [],
|
|
}));
|
|
},
|
|
|
|
// ── Load chat history ──
|
|
|
|
loadHistory: async (quiet = false) => {
|
|
const { currentSessionKey } = get();
|
|
if (!quiet) set({ loading: true, error: null });
|
|
|
|
try {
|
|
const result = await window.electron.ipcRenderer.invoke(
|
|
'gateway:rpc',
|
|
'chat.history',
|
|
{ sessionKey: currentSessionKey, limit: 200 }
|
|
) as { success: boolean; result?: Record<string, unknown>; error?: string };
|
|
|
|
if (result.success && result.result) {
|
|
const data = result.result;
|
|
const rawMessages = Array.isArray(data.messages) ? data.messages as RawMessage[] : [];
|
|
// Before filtering: attach images/files from tool_result messages to the next assistant message
|
|
const messagesWithToolImages = enrichWithToolResultFiles(rawMessages);
|
|
const filteredMessages = messagesWithToolImages.filter((msg) => !isToolResultRole(msg.role));
|
|
// Restore file attachments for user/assistant messages (from cache + text patterns)
|
|
const enrichedMessages = enrichWithCachedImages(filteredMessages);
|
|
const thinkingLevel = data.thinkingLevel ? String(data.thinkingLevel) : null;
|
|
set({ messages: enrichedMessages, thinkingLevel, loading: false });
|
|
|
|
// Async: load missing image previews from disk (updates in background)
|
|
loadMissingPreviews(enrichedMessages).then((updated) => {
|
|
if (updated) {
|
|
// Create new object references so React.memo detects changes.
|
|
// loadMissingPreviews mutates AttachedFileMeta in place, so we
|
|
// must produce fresh message + file references for each affected msg.
|
|
set({
|
|
messages: enrichedMessages.map(msg =>
|
|
msg._attachedFiles
|
|
? { ...msg, _attachedFiles: msg._attachedFiles.map(f => ({ ...f })) }
|
|
: msg
|
|
),
|
|
});
|
|
}
|
|
});
|
|
const { pendingFinal, lastUserMessageAt } = get();
|
|
if (pendingFinal) {
|
|
const recentAssistant = [...filteredMessages].reverse().find((msg) => {
|
|
if (msg.role !== 'assistant') return false;
|
|
if (!hasNonToolAssistantContent(msg)) return false;
|
|
if (lastUserMessageAt && msg.timestamp && msg.timestamp < lastUserMessageAt) return false;
|
|
return true;
|
|
});
|
|
if (recentAssistant) {
|
|
set({ sending: false, activeRunId: null, pendingFinal: false });
|
|
}
|
|
}
|
|
} else {
|
|
set({ messages: [], loading: false });
|
|
}
|
|
} catch (err) {
|
|
console.warn('Failed to load chat history:', err);
|
|
set({ messages: [], loading: false });
|
|
}
|
|
},
|
|
|
|
// ── Send message ──
|
|
|
|
sendMessage: async (text: string, attachments?: Array<{ fileName: string; mimeType: string; fileSize: number; stagedPath: string; preview: string | null }>) => {
|
|
const trimmed = text.trim();
|
|
if (!trimmed && (!attachments || attachments.length === 0)) return;
|
|
|
|
const { currentSessionKey } = get();
|
|
|
|
// Add user message optimistically (with local file metadata for UI display)
|
|
const userMsg: RawMessage = {
|
|
role: 'user',
|
|
content: trimmed || (attachments?.length ? '(file attached)' : ''),
|
|
timestamp: Date.now() / 1000,
|
|
id: crypto.randomUUID(),
|
|
_attachedFiles: attachments?.map(a => ({
|
|
fileName: a.fileName,
|
|
mimeType: a.mimeType,
|
|
fileSize: a.fileSize,
|
|
preview: a.preview,
|
|
filePath: a.stagedPath,
|
|
})),
|
|
};
|
|
set((s) => ({
|
|
messages: [...s.messages, userMsg],
|
|
sending: true,
|
|
error: null,
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
streamingTools: [],
|
|
pendingFinal: false,
|
|
lastUserMessageAt: userMsg.timestamp ?? null,
|
|
}));
|
|
|
|
try {
|
|
const idempotencyKey = crypto.randomUUID();
|
|
const hasMedia = attachments && attachments.length > 0;
|
|
console.log(`[sendMessage] hasMedia=${hasMedia}, attachmentCount=${attachments?.length ?? 0}`);
|
|
if (hasMedia) {
|
|
console.log('[sendMessage] Media paths:', attachments!.map(a => a.stagedPath));
|
|
}
|
|
|
|
// Cache image attachments BEFORE the IPC call to avoid race condition:
|
|
// history may reload (via Gateway event) before the RPC returns.
|
|
// Keyed by staged file path which appears in [media attached: <path> ...].
|
|
if (hasMedia && attachments) {
|
|
for (const a of attachments) {
|
|
_imageCache.set(a.stagedPath, {
|
|
fileName: a.fileName,
|
|
mimeType: a.mimeType,
|
|
fileSize: a.fileSize,
|
|
preview: a.preview,
|
|
});
|
|
}
|
|
saveImageCache(_imageCache);
|
|
}
|
|
|
|
let result: { success: boolean; result?: { runId?: string }; error?: string };
|
|
|
|
if (hasMedia) {
|
|
// Use dedicated chat:sendWithMedia handler — main process reads staged files
|
|
// from disk and builds base64 attachments, avoiding large IPC transfers
|
|
result = await window.electron.ipcRenderer.invoke(
|
|
'chat:sendWithMedia',
|
|
{
|
|
sessionKey: currentSessionKey,
|
|
message: trimmed || 'Process the attached file(s).',
|
|
deliver: false,
|
|
idempotencyKey,
|
|
media: attachments.map((a) => ({
|
|
filePath: a.stagedPath,
|
|
mimeType: a.mimeType,
|
|
fileName: a.fileName,
|
|
})),
|
|
},
|
|
) as { success: boolean; result?: { runId?: string }; error?: string };
|
|
} else {
|
|
// No media — use standard lightweight RPC
|
|
result = await window.electron.ipcRenderer.invoke(
|
|
'gateway:rpc',
|
|
'chat.send',
|
|
{
|
|
sessionKey: currentSessionKey,
|
|
message: trimmed,
|
|
deliver: false,
|
|
idempotencyKey,
|
|
},
|
|
) as { success: boolean; result?: { runId?: string }; error?: string };
|
|
}
|
|
|
|
console.log(`[sendMessage] RPC result: success=${result.success}, error=${result.error || 'none'}, runId=${result.result?.runId || 'none'}`);
|
|
|
|
if (!result.success) {
|
|
set({ error: result.error || 'Failed to send message', sending: false });
|
|
} else if (result.result?.runId) {
|
|
set({ activeRunId: result.result.runId });
|
|
} else {
|
|
// No runId from gateway; keep sending state and wait for events.
|
|
}
|
|
} catch (err) {
|
|
set({ error: String(err), sending: false });
|
|
}
|
|
},
|
|
|
|
// ── Abort active run ──
|
|
|
|
abortRun: async () => {
|
|
const { currentSessionKey } = get();
|
|
set({ sending: false, streamingText: '', streamingMessage: null, pendingFinal: false, lastUserMessageAt: null, pendingToolImages: [] });
|
|
set({ streamingTools: [] });
|
|
|
|
try {
|
|
await window.electron.ipcRenderer.invoke(
|
|
'gateway:rpc',
|
|
'chat.abort',
|
|
{ sessionKey: currentSessionKey },
|
|
);
|
|
} catch (err) {
|
|
set({ error: String(err) });
|
|
}
|
|
},
|
|
|
|
// ── Handle incoming chat events from Gateway ──
|
|
|
|
handleChatEvent: (event: Record<string, unknown>) => {
|
|
const runId = String(event.runId || '');
|
|
const eventState = String(event.state || '');
|
|
const { activeRunId } = get();
|
|
|
|
// Only process events for the active run (or if no active run set)
|
|
if (activeRunId && runId && runId !== activeRunId) return;
|
|
|
|
// Defensive: if state is missing but we have a message, try to infer state.
|
|
// This handles the case where the Gateway sends events without a state wrapper
|
|
// (e.g., protocol events where payload is the raw message).
|
|
let resolvedState = eventState;
|
|
if (!resolvedState && event.message && typeof event.message === 'object') {
|
|
const msg = event.message as Record<string, unknown>;
|
|
const stopReason = msg.stopReason ?? msg.stop_reason;
|
|
if (stopReason) {
|
|
// Message has a stopReason → it's a final message
|
|
resolvedState = 'final';
|
|
} else if (msg.role || msg.content) {
|
|
// Message has role/content but no stopReason → treat as delta (streaming)
|
|
resolvedState = 'delta';
|
|
}
|
|
}
|
|
|
|
switch (resolvedState) {
|
|
case 'delta': {
|
|
// Streaming update - store the cumulative message
|
|
const updates = collectToolUpdates(event.message, resolvedState);
|
|
set((s) => ({
|
|
streamingMessage: (() => {
|
|
if (event.message && typeof event.message === 'object') {
|
|
const msgRole = (event.message as RawMessage).role;
|
|
if (isToolResultRole(msgRole)) return s.streamingMessage;
|
|
}
|
|
return event.message ?? s.streamingMessage;
|
|
})(),
|
|
streamingTools: updates.length > 0 ? upsertToolStatuses(s.streamingTools, updates) : s.streamingTools,
|
|
}));
|
|
break;
|
|
}
|
|
case 'final': {
|
|
// Message complete - add to history and clear streaming
|
|
const finalMsg = event.message as RawMessage | undefined;
|
|
if (finalMsg) {
|
|
const updates = collectToolUpdates(finalMsg, resolvedState);
|
|
if (isToolResultRole(finalMsg.role)) {
|
|
// Resolve file path from the streaming assistant message's matching tool call
|
|
const currentStreamForPath = get().streamingMessage as RawMessage | null;
|
|
const matchedPath = (currentStreamForPath && finalMsg.toolCallId)
|
|
? getToolCallFilePath(currentStreamForPath, finalMsg.toolCallId)
|
|
: undefined;
|
|
|
|
// Mirror enrichWithToolResultFiles: collect images + file refs for next assistant msg
|
|
const toolFiles: AttachedFileMeta[] = [
|
|
...extractImagesAsAttachedFiles(finalMsg.content),
|
|
];
|
|
if (matchedPath) {
|
|
for (const f of toolFiles) {
|
|
if (!f.filePath) {
|
|
f.filePath = matchedPath;
|
|
f.fileName = matchedPath.split(/[\\/]/).pop() || 'image';
|
|
}
|
|
}
|
|
}
|
|
const text = getMessageText(finalMsg.content);
|
|
if (text) {
|
|
const mediaRefs = extractMediaRefs(text);
|
|
const mediaRefPaths = new Set(mediaRefs.map(r => r.filePath));
|
|
for (const ref of mediaRefs) toolFiles.push(makeAttachedFile(ref));
|
|
for (const ref of extractRawFilePaths(text)) {
|
|
if (!mediaRefPaths.has(ref.filePath)) toolFiles.push(makeAttachedFile(ref));
|
|
}
|
|
}
|
|
set((s) => {
|
|
// Snapshot the current streaming assistant message (thinking + tool_use) into
|
|
// messages[] before clearing it. The Gateway does NOT send separate 'final'
|
|
// events for intermediate tool-use turns — it only sends deltas and then the
|
|
// tool result. Without snapshotting here, the intermediate thinking+tool steps
|
|
// would be overwritten by the next turn's deltas and never appear in the UI.
|
|
const currentStream = s.streamingMessage as RawMessage | null;
|
|
const snapshotMsgs: RawMessage[] = [];
|
|
if (currentStream) {
|
|
const streamRole = currentStream.role;
|
|
if (streamRole === 'assistant' || streamRole === undefined) {
|
|
// Use message's own id if available, otherwise derive a stable one from runId
|
|
const snapId = currentStream.id
|
|
|| `${runId || 'run'}-turn-${s.messages.length}`;
|
|
if (!s.messages.some(m => m.id === snapId)) {
|
|
snapshotMsgs.push({
|
|
...(currentStream as RawMessage),
|
|
role: 'assistant',
|
|
id: snapId,
|
|
});
|
|
}
|
|
}
|
|
}
|
|
return {
|
|
messages: snapshotMsgs.length > 0 ? [...s.messages, ...snapshotMsgs] : s.messages,
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
pendingFinal: true,
|
|
pendingToolImages: toolFiles.length > 0
|
|
? [...s.pendingToolImages, ...toolFiles]
|
|
: s.pendingToolImages,
|
|
streamingTools: updates.length > 0 ? upsertToolStatuses(s.streamingTools, updates) : s.streamingTools,
|
|
};
|
|
});
|
|
break;
|
|
}
|
|
const toolOnly = isToolOnlyMessage(finalMsg);
|
|
const hasOutput = hasNonToolAssistantContent(finalMsg);
|
|
const msgId = finalMsg.id || (toolOnly ? `run-${runId}-tool-${Date.now()}` : `run-${runId}`);
|
|
set((s) => {
|
|
const nextTools = updates.length > 0 ? upsertToolStatuses(s.streamingTools, updates) : s.streamingTools;
|
|
const streamingTools = hasOutput ? [] : nextTools;
|
|
|
|
// Attach any images collected from preceding tool results
|
|
const pendingImgs = s.pendingToolImages;
|
|
const msgWithImages: RawMessage = pendingImgs.length > 0
|
|
? {
|
|
...finalMsg,
|
|
role: (finalMsg.role || 'assistant') as RawMessage['role'],
|
|
id: msgId,
|
|
_attachedFiles: [...(finalMsg._attachedFiles || []), ...pendingImgs],
|
|
}
|
|
: { ...finalMsg, role: (finalMsg.role || 'assistant') as RawMessage['role'], id: msgId };
|
|
const clearPendingImages = { pendingToolImages: [] as AttachedFileMeta[] };
|
|
|
|
// Check if message already exists (prevent duplicates)
|
|
const alreadyExists = s.messages.some(m => m.id === msgId);
|
|
if (alreadyExists) {
|
|
return toolOnly ? {
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
pendingFinal: true,
|
|
streamingTools,
|
|
...clearPendingImages,
|
|
} : {
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
sending: hasOutput ? false : s.sending,
|
|
activeRunId: hasOutput ? null : s.activeRunId,
|
|
pendingFinal: hasOutput ? false : true,
|
|
streamingTools,
|
|
...clearPendingImages,
|
|
};
|
|
}
|
|
return toolOnly ? {
|
|
messages: [...s.messages, msgWithImages],
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
pendingFinal: true,
|
|
streamingTools,
|
|
...clearPendingImages,
|
|
} : {
|
|
messages: [...s.messages, msgWithImages],
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
sending: hasOutput ? false : s.sending,
|
|
activeRunId: hasOutput ? null : s.activeRunId,
|
|
pendingFinal: hasOutput ? false : true,
|
|
streamingTools,
|
|
...clearPendingImages,
|
|
};
|
|
});
|
|
// After the final response, quietly reload history to surface all intermediate
|
|
// tool-use turns (thinking + tool blocks) from the Gateway's authoritative record.
|
|
if (hasOutput && !toolOnly) {
|
|
void get().loadHistory(true);
|
|
}
|
|
} else {
|
|
// No message in final event - reload history to get complete data
|
|
set({ streamingText: '', streamingMessage: null, pendingFinal: true });
|
|
get().loadHistory();
|
|
}
|
|
break;
|
|
}
|
|
case 'error': {
|
|
const errorMsg = String(event.errorMessage || 'An error occurred');
|
|
set({
|
|
error: errorMsg,
|
|
sending: false,
|
|
activeRunId: null,
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
streamingTools: [],
|
|
pendingFinal: false,
|
|
lastUserMessageAt: null,
|
|
pendingToolImages: [],
|
|
});
|
|
break;
|
|
}
|
|
case 'aborted': {
|
|
set({
|
|
sending: false,
|
|
activeRunId: null,
|
|
streamingText: '',
|
|
streamingMessage: null,
|
|
streamingTools: [],
|
|
pendingFinal: false,
|
|
lastUserMessageAt: null,
|
|
pendingToolImages: [],
|
|
});
|
|
break;
|
|
}
|
|
default: {
|
|
// Unknown or empty state — if we're currently sending and receive an event
|
|
// with a message, attempt to process it as streaming data. This handles
|
|
// edge cases where the Gateway sends events without a state field.
|
|
const { sending } = get();
|
|
if (sending && event.message && typeof event.message === 'object') {
|
|
console.warn(`[handleChatEvent] Unknown event state "${resolvedState}", treating message as streaming delta. Event keys:`, Object.keys(event));
|
|
const updates = collectToolUpdates(event.message, 'delta');
|
|
set((s) => ({
|
|
streamingMessage: event.message ?? s.streamingMessage,
|
|
streamingTools: updates.length > 0 ? upsertToolStatuses(s.streamingTools, updates) : s.streamingTools,
|
|
}));
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
},
|
|
|
|
// ── Toggle thinking visibility ──
|
|
|
|
toggleThinking: () => set((s) => ({ showThinking: !s.showThinking })),
|
|
|
|
// ── Refresh: reload history + sessions ──
|
|
|
|
refresh: async () => {
|
|
const { loadHistory, loadSessions } = get();
|
|
await Promise.all([loadHistory(), loadSessions()]);
|
|
},
|
|
|
|
clearError: () => set({ error: null }),
|
|
}));
|