v0.5.0: Binary-Free Mode - No OpenCode binary required

 Major Features:
- Native session management without OpenCode binary
- Provider routing: OpenCode Zen (free), Qwen OAuth, Z.AI
- Streaming chat with tool execution loop
- Mode detection API (/api/meta/mode)
- MCP integration fix (resolved infinite loading)
- NomadArch Native option in UI with comparison info

🆓 Free Models (No API Key):
- GPT-5 Nano (400K context)
- Grok Code Fast 1 (256K context)
- GLM-4.7 (205K context)
- Doubao Seed Code (256K context)
- Big Pickle (200K context)

📦 New Files:
- session-store.ts: Native session persistence
- native-sessions.ts: REST API for sessions
- lite-mode.ts: UI mode detection client
- native-sessions.ts (UI): SolidJS store

🔧 Updated:
- All installers: Optional binary download
- All launchers: Mode detection display
- Binary selector: Added NomadArch Native option
- README: Binary-Free Mode documentation
This commit is contained in:
Gemini AI
2025-12-26 02:08:13 +04:00
Unverified
parent 8dddf4d0cf
commit 4bd2893864
83 changed files with 10678 additions and 1290 deletions

View File

@@ -29,9 +29,14 @@ function cloneInstanceData(data?: InstanceData | null): InstanceData {
}
}
// Track instance IDs that we are currently saving - ignore SSE echoes
const pendingSaveIds = new Set<string>()
function attachSubscription(instanceId: string) {
if (instanceSubscriptions.has(instanceId)) return
const unsubscribe = storage.onInstanceDataChanged(instanceId, (data) => {
// Skip SSE echo from our own save
if (pendingSaveIds.has(instanceId)) return
setInstanceData(instanceId, data)
})
instanceSubscriptions.set(instanceId, unsubscribe)
@@ -83,12 +88,15 @@ async function updateInstanceConfig(instanceId: string, mutator: (draft: Instanc
const current = instanceDataMap().get(instanceId) ?? DEFAULT_INSTANCE_DATA
const draft = cloneInstanceData(current)
mutator(draft)
setInstanceData(instanceId, draft)
pendingSaveIds.add(instanceId)
try {
await storage.saveInstanceData(instanceId, draft)
} catch (error) {
log.warn("Failed to persist instance data", error)
} finally {
setTimeout(() => pendingSaveIds.delete(instanceId), 1000)
}
setInstanceData(instanceId, draft)
}
function getInstanceConfig(instanceId: string): InstanceData {

View File

@@ -170,13 +170,52 @@ function handleWorkspaceEvent(event: WorkspaceEventPayload) {
}
}
const logBuffer = new Map<string, LogEntry[]>()
let logFlushPending = false
function flushLogs() {
if (logBuffer.size === 0) {
logFlushPending = false
return
}
batch(() => {
setInstanceLogs((prev) => {
const next = new Map(prev)
for (const [id, newEntries] of logBuffer) {
const existing = next.get(id) ?? []
// Keep only last MAX_LOG_ENTRIES
const combined = [...existing, ...newEntries]
const updated = combined.slice(-MAX_LOG_ENTRIES)
next.set(id, updated)
}
return next
})
})
logBuffer.clear()
logFlushPending = false
}
function handleWorkspaceLog(entry: WorkspaceLogEntry) {
const logEntry: LogEntry = {
timestamp: new Date(entry.timestamp).getTime(),
level: (entry.level as LogEntry["level"]) ?? "info",
message: entry.message,
}
addLog(entry.workspaceId, logEntry)
// Only buffer if streaming is enabled for this instance, to save memory
if (!isInstanceLogStreaming(entry.workspaceId)) {
return
}
const currentBuffer = logBuffer.get(entry.workspaceId) ?? []
currentBuffer.push(logEntry)
logBuffer.set(entry.workspaceId, currentBuffer)
if (!logFlushPending) {
logFlushPending = true
setTimeout(flushLogs, 100) // Throttle updates to every 100ms
}
}
function ensureLogContainer(id: string) {

View File

@@ -1,4 +1,4 @@
import { batch } from "solid-js"
import { batch, untrack } from "solid-js"
import { createStore, produce, reconcile } from "solid-js/store"
import type { SetStoreFunction } from "solid-js/store"
import { getLogger } from "../../lib/logger"
@@ -43,6 +43,7 @@ function createInitialState(instanceId: string): InstanceMessageState {
usage: {},
scrollState: {},
latestTodos: {},
streamingUpdateCount: 0,
}
}
@@ -214,7 +215,10 @@ export interface InstanceMessageStore {
getMessage: (messageId: string) => MessageRecord | undefined
getLatestTodoSnapshot: (sessionId: string) => LatestTodoSnapshot | undefined
clearSession: (sessionId: string) => void
beginStreamingUpdate: () => void
endStreamingUpdate: () => void
clearInstance: () => void
isStreaming: () => boolean
}
export function createInstanceMessageStore(instanceId: string, hooks?: MessageStoreHooks): InstanceMessageStore {
@@ -271,6 +275,7 @@ export function createInstanceMessageStore(instanceId: string, hooks?: MessageSt
function bumpSessionRevision(sessionId: string) {
if (!sessionId) return
if (state.streamingUpdateCount > 0) return
setState("sessionRevisions", sessionId, (value = 0) => value + 1)
}
@@ -282,9 +287,9 @@ export function createInstanceMessageStore(instanceId: string, hooks?: MessageSt
setState("usage", sessionId, (current) => {
const draft = current
? {
...current,
entries: { ...current.entries },
}
...current,
entries: { ...current.entries },
}
: createEmptyUsageState()
updater(draft)
return draft
@@ -464,23 +469,31 @@ export function createInstanceMessageStore(instanceId: string, hooks?: MessageSt
let nextRecord: MessageRecord | undefined
setState("messages", input.id, (previous) => {
const revision = previous ? previous.revision + (shouldBump ? 1 : 0) : 0
const record: MessageRecord = {
id: input.id,
sessionId: input.sessionId,
role: input.role,
status: input.status,
createdAt: input.createdAt ?? previous?.createdAt ?? now,
updatedAt: input.updatedAt ?? now,
isEphemeral: input.isEphemeral ?? previous?.isEphemeral ?? false,
revision,
partIds: normalizedParts ? normalizedParts.ids : previous?.partIds ?? [],
parts: normalizedParts ? normalizedParts.map : previous?.parts ?? {},
}
nextRecord = record
return record
})
const updateState = () => {
setState("messages", input.id, (previous) => {
const revision = previous ? previous.revision + (shouldBump ? 1 : 0) : 0
const record: MessageRecord = {
id: input.id,
sessionId: input.sessionId,
role: input.role,
status: input.status,
createdAt: input.createdAt ?? previous?.createdAt ?? now,
updatedAt: input.updatedAt ?? now,
isEphemeral: input.isEphemeral ?? previous?.isEphemeral ?? false,
revision,
partIds: normalizedParts ? normalizedParts.ids : previous?.partIds ?? [],
parts: normalizedParts ? normalizedParts.map : previous?.parts ?? {},
}
nextRecord = record
return record
})
}
if (state.streamingUpdateCount > 0) {
untrack(updateState)
} else {
updateState()
}
if (nextRecord) {
maybeUpdateLatestTodoFromRecord(nextRecord)
@@ -512,30 +525,33 @@ export function createInstanceMessageStore(instanceId: string, hooks?: MessageSt
bufferPendingPart({ messageId: input.messageId, part: input.part, receivedAt: Date.now() })
return
}
const partId = ensurePartId(input.messageId, input.part, message.partIds.length)
const cloned = clonePart(input.part)
setState(
"messages",
input.messageId,
produce((draft: MessageRecord) => {
if (!draft.partIds.includes(partId)) {
draft.partIds = [...draft.partIds, partId]
}
const existing = draft.parts[partId]
const nextRevision = existing ? existing.revision + 1 : 0
draft.parts[partId] = {
id: partId,
data: cloned,
revision: nextRevision,
}
draft.updatedAt = Date.now()
if (input.bumpRevision ?? true) {
draft.revision += 1
}
}),
)
const updateFn = produce((draft: MessageRecord) => {
if (!draft.partIds.includes(partId)) {
draft.partIds = [...draft.partIds, partId]
}
const existing = draft.parts[partId]
const nextRevision = existing ? existing.revision + 1 : 0
draft.parts[partId] = {
id: partId,
data: cloned,
revision: nextRevision,
}
draft.updatedAt = Date.now()
if (input.bumpRevision ?? true) {
draft.revision += 1
}
})
const updateMessage = () => setState("messages", input.messageId, updateFn)
if (state.streamingUpdateCount > 0) {
untrack(updateMessage)
} else {
updateMessage()
}
if (isCompletedTodoPart(cloned)) {
recordLatestTodoSnapshot(message.sessionId, {
@@ -544,7 +560,7 @@ export function createInstanceMessageStore(instanceId: string, hooks?: MessageSt
timestamp: Date.now(),
})
}
// Any part update can change the rendered height of the message
// list, so we treat it as a session revision for scroll purposes.
bumpSessionRevision(message.sessionId)
@@ -637,8 +653,15 @@ export function createInstanceMessageStore(instanceId: string, hooks?: MessageSt
if (!messageId) return
messageInfoCache.set(messageId, info)
const nextVersion = (state.messageInfoVersion[messageId] ?? 0) + 1
setState("messageInfoVersion", messageId, nextVersion)
updateUsageWithInfo(info)
if (state.streamingUpdateCount > 0) {
setState("messageInfoVersion", messageId, nextVersion)
updateUsageWithInfo(info)
} else {
untrack(() => {
setState("messageInfoVersion", messageId, nextVersion)
updateUsageWithInfo(info)
})
}
}
function getMessageInfo(messageId: string) {
@@ -775,16 +798,16 @@ export function createInstanceMessageStore(instanceId: string, hooks?: MessageSt
return state.scrollState[key]
}
function clearSession(sessionId: string) {
if (!sessionId) return
function clearSession(sessionId: string) {
if (!sessionId) return
const messageIds = Object.values(state.messages)
.filter((record) => record.sessionId === sessionId)
.map((record) => record.id)
storeLog.info("Clearing session data", { instanceId, sessionId, messageCount: messageIds.length })
clearRecordDisplayCacheForMessages(instanceId, messageIds)
batch(() => {
setState("messages", (prev) => {
const next = { ...prev }
@@ -854,46 +877,60 @@ export function createInstanceMessageStore(instanceId: string, hooks?: MessageSt
})
clearLatestTodoSnapshot(sessionId)
hooks?.onSessionCleared?.(instanceId, sessionId)
}
function clearInstance() {
messageInfoCache.clear()
setState(reconcile(createInitialState(instanceId)))
}
return {
instanceId,
state,
setState,
addOrUpdateSession,
hydrateMessages,
upsertMessage,
applyPartUpdate,
bufferPendingPart,
flushPendingParts,
replaceMessageId,
setMessageInfo,
getMessageInfo,
upsertPermission,
removePermission,
getPermissionState,
setSessionRevert,
getSessionRevert,
rebuildUsage,
getSessionUsage,
setScrollSnapshot,
getScrollSnapshot,
getSessionRevision: getSessionRevisionValue,
getSessionMessageIds: (sessionId: string) => state.sessions[sessionId]?.messageIds ?? [],
getMessage: (messageId: string) => state.messages[messageId],
getLatestTodoSnapshot: (sessionId: string) => state.latestTodos[sessionId],
clearSession,
clearInstance,
}
function clearInstance() {
messageInfoCache.clear()
setState(reconcile(createInitialState(instanceId)))
}
function beginStreamingUpdate() {
setState("streamingUpdateCount", (count) => count + 1)
}
function endStreamingUpdate() {
setState("streamingUpdateCount", (count) => Math.max(0, count - 1))
}
function isStreaming() {
return state.streamingUpdateCount > 0
}
return {
instanceId,
state,
setState,
addOrUpdateSession,
hydrateMessages,
upsertMessage,
applyPartUpdate,
bufferPendingPart,
flushPendingParts,
replaceMessageId,
setMessageInfo,
getMessageInfo,
upsertPermission,
removePermission,
getPermissionState,
setSessionRevert,
getSessionRevert,
rebuildUsage,
getSessionUsage,
setScrollSnapshot,
getScrollSnapshot,
getSessionRevision: getSessionRevisionValue,
getSessionMessageIds: (sessionId: string) => state.sessions[sessionId]?.messageIds ?? [],
getMessage: (messageId: string) => state.messages[messageId],
getLatestTodoSnapshot: (sessionId: string) => state.latestTodos[sessionId],
clearSession,
clearInstance,
beginStreamingUpdate,
endStreamingUpdate,
isStreaming,
}
}

View File

@@ -1,7 +1,7 @@
import type { ClientPart } from "../../types/message"
import type { Permission } from "@opencode-ai/sdk"
export type MessageStatus = "sending" | "sent" | "streaming" | "complete" | "error"
export type MessageStatus = "sending" | "sent" | "streaming" | "complete" | "error" | "interrupted"
export type MessageRole = "user" | "assistant"
export interface NormalizedPartRecord {
@@ -108,6 +108,7 @@ export interface InstanceMessageState {
usage: Record<string, SessionUsageState>
scrollState: Record<string, ScrollSnapshot>
latestTodos: Record<string, LatestTodoSnapshot | undefined>
streamingUpdateCount: number
}
export interface SessionUpsertInput {

View File

@@ -0,0 +1,319 @@
/**
* Native Session Store - UI-side session management for Binary-Free Mode
*
* This store provides a drop-in replacement for OpenCode SDK session operations
* when running in Binary-Free (Lite) Mode.
*/
import { createSignal, createMemo, batch } from "solid-js"
import type { Session } from "../types/session"
import type { Message, Part } from "../types/message"
import { nativeSessionApi, isLiteMode, NativeSession, NativeMessage } from "../lib/lite-mode"
import { getLogger } from "../lib/logger"
const log = getLogger("native-sessions")
// State
const [nativeSessions, setNativeSessions] = createSignal<Map<string, Map<string, Session>>>(new Map())
const [nativeMessages, setNativeMessages] = createSignal<Map<string, Message[]>>(new Map())
const [isLiteModeActive, setIsLiteModeActive] = createSignal<boolean | null>(null)
/**
* Check and cache lite mode status
*/
export async function checkLiteMode(): Promise<boolean> {
if (isLiteModeActive() !== null) {
return isLiteModeActive()!
}
try {
const liteMode = await isLiteMode()
setIsLiteModeActive(liteMode)
log.info(`Running in ${liteMode ? 'Lite' : 'Full'} mode`)
return liteMode
} catch (error) {
log.warn("Failed to check lite mode, defaulting to full mode", error)
setIsLiteModeActive(false)
return false
}
}
/**
* Get the current lite mode status (synchronous, may be null if not checked)
*/
export function getLiteModeStatus(): boolean | null {
return isLiteModeActive()
}
/**
* Force set lite mode (for testing or manual override)
*/
export function forceLiteMode(enabled: boolean): void {
setIsLiteModeActive(enabled)
}
// Convert native session to UI session format
function nativeToUiSession(native: NativeSession): Session {
return {
id: native.id,
title: native.title,
parentId: native.parentId ?? undefined,
createdAt: native.createdAt,
updatedAt: native.updatedAt,
agent: native.agent,
model: native.model ? {
providerId: native.model.providerId,
modelId: native.model.modelId,
} : undefined,
}
}
// Convert native message to UI message format
function nativeToUiMessage(native: NativeMessage): Message {
const parts: Part[] = []
if (native.content) {
parts.push({
type: "text",
text: native.content,
})
}
return {
id: native.id,
sessionId: native.sessionId,
role: native.role,
createdAt: native.createdAt,
parts,
}
}
/**
* Fetch sessions from native API
*/
export async function fetchNativeSessions(workspaceId: string): Promise<Session[]> {
try {
const sessions = await nativeSessionApi.listSessions(workspaceId)
const uiSessions = sessions.map(nativeToUiSession)
// Update state
setNativeSessions(prev => {
const next = new Map(prev)
const wsMap = new Map<string, Session>()
for (const s of uiSessions) {
wsMap.set(s.id, s)
}
next.set(workspaceId, wsMap)
return next
})
return uiSessions
} catch (error) {
log.error("Failed to fetch native sessions", error)
return []
}
}
/**
* Create a new native session
*/
export async function createNativeSession(
workspaceId: string,
options?: {
title?: string
parentId?: string
model?: { providerId: string; modelId: string }
agent?: string
}
): Promise<Session> {
const native = await nativeSessionApi.createSession(workspaceId, options)
const session = nativeToUiSession(native)
// Update state
setNativeSessions(prev => {
const next = new Map(prev)
const wsMap = new Map(next.get(workspaceId) ?? new Map())
wsMap.set(session.id, session)
next.set(workspaceId, wsMap)
return next
})
return session
}
/**
* Delete a native session
*/
export async function deleteNativeSession(workspaceId: string, sessionId: string): Promise<void> {
await nativeSessionApi.deleteSession(workspaceId, sessionId)
// Update state
setNativeSessions(prev => {
const next = new Map(prev)
const wsMap = new Map(next.get(workspaceId) ?? new Map())
wsMap.delete(sessionId)
next.set(workspaceId, wsMap)
return next
})
// Clear messages
setNativeMessages(prev => {
const next = new Map(prev)
next.delete(`${workspaceId}:${sessionId}`)
return next
})
}
/**
* Get messages for a native session
*/
export async function fetchNativeMessages(workspaceId: string, sessionId: string): Promise<Message[]> {
try {
const messages = await nativeSessionApi.getMessages(workspaceId, sessionId)
const uiMessages = messages.map(nativeToUiMessage)
// Update state
const key = `${workspaceId}:${sessionId}`
setNativeMessages(prev => {
const next = new Map(prev)
next.set(key, uiMessages)
return next
})
return uiMessages
} catch (error) {
log.error("Failed to fetch native messages", error)
return []
}
}
/**
* Get cached native sessions for a workspace
*/
export function getNativeSessions(workspaceId: string): Session[] {
const wsMap = nativeSessions().get(workspaceId)
return wsMap ? Array.from(wsMap.values()) : []
}
/**
* Get cached native messages for a session
*/
export function getNativeMessages(workspaceId: string, sessionId: string): Message[] {
const key = `${workspaceId}:${sessionId}`
return nativeMessages().get(key) ?? []
}
/**
* Send a message to a native session with streaming
*/
export async function sendNativeMessage(
workspaceId: string,
sessionId: string,
content: string,
options?: {
provider?: "qwen" | "zai" | "zen"
accessToken?: string
resourceUrl?: string
enableTools?: boolean
onChunk?: (content: string) => void
onDone?: () => void
onError?: (error: string) => void
}
): Promise<void> {
const { provider = "zen", accessToken, resourceUrl, enableTools = true, onChunk, onDone, onError } = options ?? {}
try {
// Add user message to local state immediately
const userMessage: Message = {
id: `temp-${Date.now()}`,
sessionId,
role: "user",
createdAt: Date.now(),
parts: [{ type: "text", text: content }],
}
const key = `${workspaceId}:${sessionId}`
setNativeMessages(prev => {
const next = new Map(prev)
const messages = [...(next.get(key) ?? []), userMessage]
next.set(key, messages)
return next
})
// Start streaming
let fullContent = ""
for await (const chunk of nativeSessionApi.streamPrompt(workspaceId, sessionId, content, {
provider,
accessToken,
resourceUrl,
enableTools,
})) {
if (chunk.type === "content" && chunk.data) {
fullContent += chunk.data
onChunk?.(chunk.data)
} else if (chunk.type === "error") {
onError?.(chunk.data ?? "Unknown error")
return
} else if (chunk.type === "done") {
break
}
}
// Add assistant message to local state
const assistantMessage: Message = {
id: `msg-${Date.now()}`,
sessionId,
role: "assistant",
createdAt: Date.now(),
parts: [{ type: "text", text: fullContent }],
}
setNativeMessages(prev => {
const next = new Map(prev)
const messages = [...(next.get(key) ?? []), assistantMessage]
next.set(key, messages)
return next
})
onDone?.()
// Refresh messages from server to get the real IDs
await fetchNativeMessages(workspaceId, sessionId)
} catch (error) {
log.error("Failed to send native message", error)
onError?.(String(error))
}
}
/**
* Update a native session
*/
export async function updateNativeSession(
workspaceId: string,
sessionId: string,
updates: { title?: string }
): Promise<Session | null> {
const result = await nativeSessionApi.updateSession(workspaceId, sessionId, updates)
if (!result) return null
const session = nativeToUiSession(result)
// Update state
setNativeSessions(prev => {
const next = new Map(prev)
const wsMap = new Map(next.get(workspaceId) ?? new Map())
wsMap.set(session.id, session)
next.set(workspaceId, wsMap)
return next
})
return session
}
export {
nativeSessions,
nativeMessages,
isLiteModeActive,
}

View File

@@ -1,4 +1,4 @@
import { createEffect, createSignal } from "solid-js"
import { createEffect, createSignal, createRoot } from "solid-js"
import type { LatestReleaseInfo, WorkspaceEventPayload } from "../../../server/src/api-types"
import { getServerMeta } from "../lib/server-meta"
import { serverEvents } from "../lib/server-events"
@@ -29,30 +29,33 @@ function ensureVisibilityEffect() {
}
visibilityEffectInitialized = true
createEffect(() => {
const release = availableRelease()
const shouldShow = Boolean(release) && (!hasInstances() || showFolderSelection())
// Use createRoot to properly scope this effect
createRoot(() => {
createEffect(() => {
const release = availableRelease()
const shouldShow = Boolean(release) && (!hasInstances() || showFolderSelection())
if (!shouldShow || !release) {
dismissActiveToast()
return
}
if (!shouldShow || !release) {
dismissActiveToast()
return
}
if (!activeToast || activeToastVersion !== release.version) {
dismissActiveToast()
activeToast = showToastNotification({
title: `NomadArch ${release.version}`,
message: release.channel === "dev" ? "Dev release build available." : "New stable build on GitHub.",
variant: "info",
duration: Number.POSITIVE_INFINITY,
position: "bottom-right",
action: {
label: "View release",
href: release.url,
},
})
activeToastVersion = release.version
}
if (!activeToast || activeToastVersion !== release.version) {
dismissActiveToast()
activeToast = showToastNotification({
title: `NomadArch ${release.version}`,
message: release.channel === "dev" ? "Dev release build available." : "New stable build on GitHub.",
variant: "info",
duration: Number.POSITIVE_INFINITY,
position: "bottom-right",
action: {
label: "View release",
href: release.url,
},
})
activeToastVersion = release.version
}
})
})
}

View File

@@ -1,5 +1,7 @@
import { untrack, batch } from "solid-js"
import { addDebugLog } from "../components/debug-overlay"
import { resolvePastedPlaceholders } from "../lib/prompt-placeholders"
import { instances } from "./instances"
import { instances, activeInstanceId } from "./instances"
import { addTaskMessage } from "./task-actions"
import { addRecentModelPreference, setAgentModelPreference, getAgentModelPreference } from "./preferences"
@@ -36,7 +38,8 @@ const COMPACTION_ATTEMPT_TTL_MS = 60_000
const COMPACTION_SUMMARY_MAX_CHARS = 4000
const STREAM_TIMEOUT_MS = 120_000
const OPENCODE_ZEN_OFFLINE_STORAGE_KEY = "opencode-zen-offline-models"
const BUILD_PREVIEW_EVENT = "opencode:build-preview"
export const BUILD_PREVIEW_EVENT = "opencode:build-preview"
export const FILE_CHANGE_EVENT = "opencode:workspace-files-changed"
function markOpencodeZenModelOffline(modelId: string): void {
if (typeof window === "undefined" || !modelId) return
@@ -234,6 +237,8 @@ async function checkTokenBudgetBeforeSend(
type ExternalChatMessage = { role: "user" | "assistant" | "system"; content: string }
const MAX_ATTACHMENT_CHARS = 8000
const MAX_CONTEXT_MESSAGES = 100
const MAX_MESSAGES_FOR_YIELD = 50
function shouldForceEnglish(prompt: string): boolean {
const text = prompt.trim()
@@ -270,6 +275,12 @@ function clampText(value: string, maxChars: number): string {
return `${value.slice(0, Math.max(0, maxChars - 3))}...`
}
async function yieldIfNeeded(index: number): Promise<void> {
if (index > 0 && index % MAX_MESSAGES_FOR_YIELD === 0) {
await new Promise(resolve => setTimeout(resolve, 0))
}
}
async function buildSkillsSystemInstruction(instanceId: string, sessionId: string): Promise<string | undefined> {
const session = sessions().get(instanceId)?.get(sessionId)
const selected = session?.skills ?? []
@@ -290,17 +301,42 @@ async function buildSkillsSystemInstruction(instanceId: string, sessionId: strin
return `You have access to the following skills. Follow their instructions when relevant.\n\n${payload}`
}
async function buildFileSystemContext(instanceId: string): Promise<string | undefined> {
try {
const files = await serverApi.listWorkspaceFiles(instanceId)
if (!files || files.length === 0) return undefined
// Sort directories first
const sorted = files.sort((a: any, b: any) => {
const aDir = a.isDirectory || a.type === "directory"
const bDir = b.isDirectory || b.type === "directory"
if (aDir === bDir) return (a.name || "").localeCompare(b.name || "")
return aDir ? -1 : 1
})
const list = sorted.map((f: any) => {
const isDir = f.isDirectory || f.type === "directory"
return isDir ? `${f.name}/` : f.name
}).join("\n")
return `## Project Context\nCurrent Workspace Directory:\n\`\`\`\n${list}\n\`\`\`\nYou are an expert software architect working in this project. Use standard tools to explore further.`
} catch (error) {
return undefined
}
}
async function mergeSystemInstructions(
instanceId: string,
sessionId: string,
prompt: string,
): Promise<string | undefined> {
const [languageSystem, skillsSystem] = await Promise.all([
const [languageSystem, skillsSystem, projectContext] = await Promise.all([
Promise.resolve(buildLanguageSystemInstruction(prompt)),
buildSkillsSystemInstruction(instanceId, sessionId),
buildFileSystemContext(instanceId),
])
const sshInstruction = buildSshPasswordInstruction(prompt)
const sections = [languageSystem, skillsSystem, sshInstruction].filter(Boolean) as string[]
const sections = [projectContext, languageSystem, skillsSystem, sshInstruction].filter(Boolean) as string[]
if (sections.length === 0) return undefined
return sections.join("\n\n")
}
@@ -346,32 +382,40 @@ function extractPlainTextFromParts(
return segments.join("\n").trim()
}
function buildExternalChatMessages(
async function buildExternalChatMessages(
instanceId: string,
sessionId: string,
systemMessage?: string,
): ExternalChatMessage[] {
const store = messageStoreBus.getOrCreate(instanceId)
const messageIds = store.getSessionMessageIds(sessionId)
const messages: ExternalChatMessage[] = []
): Promise<ExternalChatMessage[]> {
return untrack(async () => {
const store = messageStoreBus.getOrCreate(instanceId)
const messageIds = store.getSessionMessageIds(sessionId)
const messages: ExternalChatMessage[] = []
if (systemMessage) {
messages.push({ role: "system", content: systemMessage })
}
if (systemMessage) {
messages.push({ role: "system", content: systemMessage })
}
for (const messageId of messageIds) {
const record = store.getMessage(messageId)
if (!record) continue
const { orderedParts } = buildRecordDisplayData(instanceId, record)
const content = extractPlainTextFromParts(orderedParts as Array<{ type?: string; text?: unknown; filename?: string }>)
if (!content) continue
messages.push({
role: record.role === "assistant" ? "assistant" : "user",
content,
})
}
const limitedMessageIds = messageIds.length > MAX_CONTEXT_MESSAGES
? messageIds.slice(-MAX_CONTEXT_MESSAGES)
: messageIds
return messages
for (let i = 0; i < limitedMessageIds.length; i++) {
const messageId = limitedMessageIds[i]
await yieldIfNeeded(i)
const record = store.getMessage(messageId)
if (!record) continue
const { orderedParts } = buildRecordDisplayData(instanceId, record)
const content = extractPlainTextFromParts(orderedParts as Array<{ type?: string; text?: unknown; filename?: string }>)
if (!content) continue
messages.push({
role: record.role === "assistant" ? "assistant" : "user",
content,
})
}
return messages
})
}
function decodeAttachmentData(data: Uint8Array): string {
@@ -391,7 +435,7 @@ async function buildExternalChatMessagesWithAttachments(
systemMessage: string | undefined,
attachments: Array<{ filename?: string; source?: any; mediaType?: string }>,
): Promise<ExternalChatMessage[]> {
const baseMessages = buildExternalChatMessages(instanceId, sessionId, systemMessage)
const baseMessages = await buildExternalChatMessages(instanceId, sessionId, systemMessage)
if (!attachments || attachments.length === 0) {
return baseMessages
}
@@ -455,6 +499,8 @@ async function readSseStream(
resetIdleTimer()
try {
let chunkCount = 0
let lastYieldTime = performance.now()
while (!shouldStop) {
const { done, value } = await reader.read()
if (done) break
@@ -473,9 +519,21 @@ async function readSseStream(
break
}
onData(data)
chunkCount++
}
// Throttle UI updates: yield control if time elapsed > 16ms to prevent frame drops
const now = performance.now()
if (now - lastYieldTime > 16) {
addDebugLog(`Yielding after ${Math.round(now - lastYieldTime)}ms (chunks: ${chunkCount})`, "info")
lastYieldTime = now
if ('requestIdleCallback' in window) {
await new Promise<void>(resolve => {
requestIdleCallback(() => resolve(), { timeout: 16 })
})
} else {
await new Promise<void>(resolve => setTimeout(resolve, 0))
}
}
// Yield to main thread periodically to prevent UI freeze during rapid streaming
await new Promise<void>(resolve => setTimeout(resolve, 0))
}
if (timedOut) {
throw new Error("Stream timed out")
@@ -499,6 +557,10 @@ async function streamOllamaChat(
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), STREAM_TIMEOUT_MS)
// Get workspace path for tool execution
const instance = instances().get(instanceId)
const workspacePath = instance?.folder || ""
const response = await fetch("/api/ollama/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
@@ -507,6 +569,8 @@ async function streamOllamaChat(
model: modelId,
messages,
stream: true,
workspacePath,
enableTools: true,
}),
})
@@ -516,54 +580,105 @@ async function streamOllamaChat(
}
const store = messageStoreBus.getOrCreate(instanceId)
store.beginStreamingUpdate()
let fullText = ""
let lastUpdateAt = 0
try {
await readSseStream(response, (data) => {
try {
const chunk = JSON.parse(data)
// Check for error response from server
if (chunk?.error) {
throw new Error(chunk.error)
if (chunk?.error) throw new Error(chunk.error)
// Handle tool execution results (special events from backend)
if (chunk?.type === "tool_result") {
const toolResult = `\n\n✅ **Tool Executed:** ${chunk.content}\n\n`
fullText += toolResult
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
// Dispatch file change event to refresh sidebar
if (typeof window !== "undefined") {
console.log(`[EVENT] Dispatching FILE_CHANGE_EVENT for ${instanceId}`);
window.dispatchEvent(new CustomEvent(FILE_CHANGE_EVENT, { detail: { instanceId } }))
}
// Auto-trigger preview for HTML file writes
const content = chunk.content || ""
if (content.includes("Successfully wrote") &&
(content.includes(".html") || content.includes("index.") || content.includes(".htm"))) {
if (typeof window !== "undefined") {
const htmlMatch = content.match(/to\s+([^\s]+\.html?)/)
if (htmlMatch) {
const relativePath = htmlMatch[1]
const origin = typeof window !== "undefined" ? window.location.origin : "http://localhost:3000"
const apiOrigin = origin.replace(":3000", ":9898")
const previewUrl = `${apiOrigin}/api/workspaces/${instanceId}/serve/${relativePath}`
console.log(`[EVENT] Auto-preview triggered for ${previewUrl}`);
window.dispatchEvent(new CustomEvent(BUILD_PREVIEW_EVENT, {
detail: { url: previewUrl, instanceId }
}))
}
}
}
return
}
const delta = chunk?.message?.content
if (typeof delta !== "string" || delta.length === 0) return
fullText += delta
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
const now = Date.now()
if (now - lastUpdateAt > 150) { // Limit to ~7 updates per second
lastUpdateAt = now
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
}
} catch (e) {
if (e instanceof Error) throw e
// Ignore malformed chunks
}
})
// Always apply final text update
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
} finally {
clearTimeout(timeoutId)
store.endStreamingUpdate()
}
store.upsertMessage({
id: assistantMessageId,
sessionId,
role: "assistant",
status: "complete",
updatedAt: Date.now(),
isEphemeral: false,
})
store.setMessageInfo(assistantMessageId, {
id: assistantMessageId,
role: "assistant",
providerID: providerId,
modelID: modelId,
time: { created: store.getMessageInfo(assistantMessageId)?.time?.created ?? Date.now(), completed: Date.now() },
} as any)
store.upsertMessage({
id: messageId,
sessionId,
role: "user",
status: "sent",
updatedAt: Date.now(),
isEphemeral: false,
batch(() => {
store.upsertMessage({
id: assistantMessageId,
sessionId,
role: "assistant",
status: "complete",
updatedAt: Date.now(),
isEphemeral: false,
})
store.setMessageInfo(assistantMessageId, {
id: assistantMessageId,
role: "assistant",
providerID: providerId,
modelID: modelId,
time: { created: store.getMessageInfo(assistantMessageId)?.time?.created ?? Date.now(), completed: Date.now() },
} as any)
store.upsertMessage({
id: messageId,
sessionId,
role: "user",
status: "sent",
updatedAt: Date.now(),
isEphemeral: false,
})
})
}
@@ -582,6 +697,10 @@ async function streamQwenChat(
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), STREAM_TIMEOUT_MS)
// Get workspace path for tool execution
const instance = instances().get(instanceId)
const workspacePath = instance?.folder || ""
const response = await fetch("/api/qwen/chat", {
method: "POST",
headers: {
@@ -594,6 +713,8 @@ async function streamQwenChat(
messages,
stream: true,
resource_url: resourceUrl,
workspacePath,
enableTools: true,
}),
})
@@ -603,27 +724,86 @@ async function streamQwenChat(
}
const store = messageStoreBus.getOrCreate(instanceId)
store.beginStreamingUpdate()
let fullText = ""
let lastUpdateAt = 0
try {
await readSseStream(response, (data) => {
try {
const chunk = JSON.parse(data)
// Handle tool execution results
if (chunk?.type === "tool_result") {
const toolResult = `\n\n✅ **Tool Executed:** ${chunk.content}\n\n`
fullText += toolResult
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
// Dispatch file change event to refresh sidebar
if (typeof window !== "undefined") {
console.log(`[Qwen] Dispatching FILE_CHANGE_EVENT for ${instanceId}`);
console.log(`[EVENT] Dispatching FILE_CHANGE_EVENT for ${instanceId}`);
window.dispatchEvent(new CustomEvent(FILE_CHANGE_EVENT, { detail: { instanceId } }));
// Double-tap refresh after 1s to catch FS latency
setTimeout(() => {
window.dispatchEvent(new CustomEvent(FILE_CHANGE_EVENT, { detail: { instanceId } }));
}, 1000);
}
// Auto-trigger preview for HTML file writes
const content = chunk.content || ""
if (content.includes("Successfully wrote") &&
(content.includes(".html") || content.includes("index.") || content.includes(".htm"))) {
if (typeof window !== "undefined") {
const htmlMatch = content.match(/to\s+([^\s]+\.html?)/)
if (htmlMatch) {
const relativePath = htmlMatch[1]
const origin = typeof window !== "undefined" ? window.location.origin : "http://localhost:3000"
const apiOrigin = origin.replace(":3000", ":9898")
const previewUrl = `${apiOrigin}/api/workspaces/${instanceId}/serve/${relativePath}`
console.log(`[Qwen] Auto-preview triggered for ${relativePath}`);
console.log(`[EVENT] Auto-preview triggered for ${previewUrl}`);
window.dispatchEvent(new CustomEvent(BUILD_PREVIEW_EVENT, {
detail: { url: previewUrl, instanceId }
}))
}
}
}
return
}
const delta =
chunk?.choices?.[0]?.delta?.content ??
chunk?.choices?.[0]?.message?.content
if (typeof delta !== "string" || delta.length === 0) return
fullText += delta
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
const now = Date.now()
if (now - lastUpdateAt > 40) { // Limit to ~25 updates per second
lastUpdateAt = now
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
}
} catch {
// Ignore malformed chunks
}
})
// Always apply final text update
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
} finally {
clearTimeout(timeoutId)
store.endStreamingUpdate()
}
store.upsertMessage({
@@ -664,6 +844,10 @@ async function streamOpenCodeZenChat(
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), STREAM_TIMEOUT_MS)
// Get workspace path for tool execution
const instance = instances().get(instanceId)
const workspacePath = instance?.folder || ""
const response = await fetch("/api/opencode-zen/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
@@ -672,6 +856,8 @@ async function streamOpenCodeZenChat(
model: modelId,
messages,
stream: true,
workspacePath,
enableTools: true,
}),
})
@@ -681,7 +867,9 @@ async function streamOpenCodeZenChat(
}
const store = messageStoreBus.getOrCreate(instanceId)
store.beginStreamingUpdate()
let fullText = ""
let lastUpdateAt = 0
try {
await readSseStream(response, (data) => {
@@ -690,23 +878,78 @@ async function streamOpenCodeZenChat(
if (chunk?.error) {
throw new Error(typeof chunk.error === "string" ? chunk.error : "OpenCode Zen streaming error")
}
// Handle tool execution results (special events from backend)
if (chunk?.type === "tool_result") {
const toolResult = `\n\n✅ **Tool Executed:** ${chunk.content}\n\n`
fullText += toolResult
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
// Dispatch file change event to refresh sidebar
if (typeof window !== "undefined") {
console.log(`[Ollama] Dispatching FILE_CHANGE_EVENT for ${instanceId}`);
console.log(`[EVENT] Dispatching FILE_CHANGE_EVENT for ${instanceId}`);
window.dispatchEvent(new CustomEvent(FILE_CHANGE_EVENT, { detail: { instanceId } }))
}
// Auto-trigger preview for HTML file writes
const content = chunk.content || ""
if (content.includes("Successfully wrote") &&
(content.includes(".html") || content.includes("index.") || content.includes(".htm"))) {
if (typeof window !== "undefined") {
const htmlMatch = content.match(/to\s+([^\s]+\.html?)/)
if (htmlMatch) {
const relativePath = htmlMatch[1]
// USE PROXY URL instead of file:// to avoid "Not allowed to load local resource"
// The backend (port 9898) serves workspace files via /api/workspaces/:id/serve
const origin = typeof window !== "undefined" ? window.location.origin : "http://localhost:3000"
const apiOrigin = origin.replace(":3000", ":9898") // Fallback assumption
const previewUrl = `${apiOrigin}/api/workspaces/${instanceId}/serve/${relativePath}`
console.log(`[Ollama] Auto-preview triggered for ${relativePath}`);
console.log(`[EVENT] Auto-preview triggered for ${previewUrl}`);
window.dispatchEvent(new CustomEvent(BUILD_PREVIEW_EVENT, {
detail: { url: previewUrl, instanceId }
}))
}
}
}
return
}
const delta =
chunk?.choices?.[0]?.delta?.content ??
chunk?.choices?.[0]?.message?.content
if (typeof delta !== "string" || delta.length === 0) return
fullText += delta
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
const now = Date.now()
if (now - lastUpdateAt > 40) { // Limit to ~25 updates per second
lastUpdateAt = now
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
}
} catch (error) {
if (error instanceof Error) {
throw error
}
}
})
// Always apply final text update
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
} finally {
clearTimeout(timeoutId)
store.endStreamingUpdate()
}
@@ -748,6 +991,10 @@ async function streamZAIChat(
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), STREAM_TIMEOUT_MS)
// Get workspace path for tool execution
const instance = instances().get(instanceId)
const workspacePath = instance?.folder || ""
const response = await fetch("/api/zai/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
@@ -756,6 +1003,8 @@ async function streamZAIChat(
model: modelId,
messages,
stream: true,
workspacePath,
enableTools: true,
}),
})
@@ -765,32 +1014,81 @@ async function streamZAIChat(
}
const store = messageStoreBus.getOrCreate(instanceId)
store.beginStreamingUpdate()
let fullText = ""
let lastUpdateAt = 0
try {
await readSseStream(response, (data) => {
try {
const chunk = JSON.parse(data)
// Check for error response from server
if (chunk?.error) {
throw new Error(chunk.error)
if (chunk?.error) throw new Error(chunk.error)
// Handle tool execution results (special events from backend)
if (chunk?.type === "tool_result") {
const toolResult = `\n\n✅ **Tool Executed:** ${chunk.content}\n\n`
fullText += toolResult
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
// Dispatch file change event to refresh sidebar
if (typeof window !== "undefined") {
console.log(`[EVENT] Dispatching FILE_CHANGE_EVENT for ${instanceId}`);
window.dispatchEvent(new CustomEvent(FILE_CHANGE_EVENT, { detail: { instanceId } }))
}
// Auto-trigger preview for HTML file writes
const content = chunk.content || ""
if (content.includes("Successfully wrote") &&
(content.includes(".html") || content.includes("index.") || content.includes(".htm"))) {
if (typeof window !== "undefined") {
const htmlMatch = content.match(/to\s+([^\s]+\.html?)/)
if (htmlMatch) {
const relativePath = htmlMatch[1]
const origin = typeof window !== "undefined" ? window.location.origin : "http://localhost:3000"
const apiOrigin = origin.replace(":3000", ":9898")
const previewUrl = `${apiOrigin}/api/workspaces/${instanceId}/serve/${relativePath}`
console.log(`[EVENT] Auto-preview triggered for ${previewUrl}`);
window.dispatchEvent(new CustomEvent(BUILD_PREVIEW_EVENT, {
detail: { url: previewUrl, instanceId }
}))
}
}
}
return
}
const delta =
chunk?.choices?.[0]?.delta?.content ??
chunk?.choices?.[0]?.message?.content
if (typeof delta !== "string" || delta.length === 0) return
fullText += delta
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
const now = Date.now()
if (now - lastUpdateAt > 40) { // Limit to ~25 updates per second
lastUpdateAt = now
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
}
} catch (e) {
if (e instanceof Error) throw e
// Ignore malformed chunks
}
})
// Always apply final text update
store.applyPartUpdate({
messageId: assistantMessageId,
part: { id: assistantPartId, type: "text", text: fullText } as any,
})
} finally {
clearTimeout(timeoutId)
store.endStreamingUpdate()
}
store.upsertMessage({
@@ -941,15 +1239,17 @@ async function sendMessage(
log.info("sendMessage: upserting optimistic message", { messageId, sessionId, taskId });
store.upsertMessage({
id: messageId,
sessionId,
role: "user",
status: "sending",
parts: optimisticParts,
createdAt,
updatedAt: createdAt,
isEphemeral: true,
untrack(() => {
store.upsertMessage({
id: messageId,
sessionId,
role: "user",
status: "sending",
parts: optimisticParts,
createdAt,
updatedAt: createdAt,
isEphemeral: true,
})
})
withSession(instanceId, sessionId, () => {
@@ -957,47 +1257,62 @@ async function sendMessage(
})
const providerId = effectiveModel.providerId
const systemMessage = await mergeSystemInstructions(instanceId, sessionId, prompt)
const tPre1 = performance.now()
const systemMessage = await untrack(() => mergeSystemInstructions(instanceId, sessionId, prompt))
const tPre2 = performance.now()
if (tPre2 - tPre1 > 10) {
addDebugLog(`Merge System Instructions: ${Math.round(tPre2 - tPre1)}ms`, "warn")
}
if (providerId === "ollama-cloud" || providerId === "qwen-oauth" || providerId === "opencode-zen" || providerId === "zai") {
const store = messageStoreBus.getOrCreate(instanceId)
const now = Date.now()
const assistantMessageId = createId("msg")
const assistantPartId = createId("part")
const tMsg1 = performance.now()
const externalMessages = await buildExternalChatMessagesWithAttachments(
instanceId,
sessionId,
systemMessage,
attachments,
)
const tMsg2 = performance.now()
if (tMsg2 - tMsg1 > 10) {
addDebugLog(`Build External Messages: ${Math.round(tMsg2 - tMsg1)}ms`, "warn")
}
store.upsertMessage({
id: assistantMessageId,
sessionId,
role: "assistant",
status: "streaming",
parts: [{ id: assistantPartId, type: "text", text: "" } as any],
createdAt: now,
updatedAt: now,
isEphemeral: true,
})
store.setMessageInfo(assistantMessageId, {
id: assistantMessageId,
role: "assistant",
providerID: effectiveModel.providerId,
modelID: effectiveModel.modelId,
time: { created: now, completed: 0 },
} as any)
store.upsertMessage({
id: messageId,
sessionId,
role: "user",
status: "sent",
updatedAt: now,
isEphemeral: false,
untrack(() => {
store.upsertMessage({
id: assistantMessageId,
sessionId,
role: "assistant",
status: "streaming",
parts: [{ id: assistantPartId, type: "text", text: "" } as any],
createdAt: now,
updatedAt: now,
isEphemeral: true,
})
store.setMessageInfo(assistantMessageId, {
id: assistantMessageId,
role: "assistant",
providerID: effectiveModel.providerId,
modelID: effectiveModel.modelId,
time: { created: now, completed: 0 },
} as any)
store.upsertMessage({
id: messageId,
sessionId,
role: "user",
status: "sent",
updatedAt: now,
isEphemeral: false,
})
})
try {
if (providerId === "ollama-cloud") {
const tStream1 = performance.now()
await streamOllamaChat(
instanceId,
sessionId,
@@ -1008,6 +1323,8 @@ async function sendMessage(
assistantMessageId,
assistantPartId,
)
const tStream2 = performance.now()
addDebugLog(`Stream Complete: ${Math.round(tStream2 - tStream1)}ms`, "info")
} else if (providerId === "opencode-zen") {
await streamOpenCodeZenChat(
instanceId,
@@ -1370,7 +1687,7 @@ async function compactSession(instanceId: string, sessionId: string): Promise<Co
const tasksCopy = session.tasks.map((task) => ({ ...task }))
withSession(instanceId, compactedSession.id, (nextSession) => {
nextSession.tasks = tasksCopy
nextSession.activeTaskId = undefined
nextSession.activeTaskId = session.activeTaskId
})
}
@@ -1632,6 +1949,48 @@ async function forkSession(instanceId: string, sessionId: string): Promise<strin
}
}
// Forcefully reset streaming state to unlock UI if stuck
function forceReset() {
const store = messageStoreBus.getOrCreate(activeInstanceId() || "")
if (!store) return
// Reset streaming count forcefully
// We don't have direct access to set count to 0, so we call end enough times
// or we assume we can just ignore it for now, but really we should expose a reset method.
// For now, let's just log and clear pending parts.
store.setState("pendingParts", {})
// If we could access the store's internal streaming count setter that would be better.
// Since we added `isStreaming` and `endStreamingUpdate` to store interface,
// we can just call end multiple times if we suspect it's stuck > 0
let safety = 0
while (store.state.streamingUpdateCount > 0 && safety < 100) {
store.endStreamingUpdate()
safety++
}
// Also reset message statuses
try {
const messages = store.state.messages;
Object.values(messages).forEach(msg => {
if (msg.status === "streaming" || msg.status === "sending") {
store.upsertMessage({
id: msg.id,
sessionId: msg.sessionId,
role: msg.role,
status: "interrupted",
updatedAt: Date.now(),
isEphemeral: msg.isEphemeral,
})
}
})
} catch (e) {
console.error("Error updating message status during reset", e)
}
addDebugLog("Force Reset Triggered: Cleared streaming state & statuses", "warn")
}
export {
abortSession,
compactSession,
@@ -1644,4 +2003,5 @@ export {
updateSessionAgent,
updateSessionModel,
updateSessionModelForSession,
forceReset, // Add to exports
}

View File

@@ -18,7 +18,7 @@ import type { MessageStatus } from "./message-v2/types"
import { getLogger } from "../lib/logger"
import { showToastNotification, ToastVariant } from "../lib/notifications"
import { instances, addPermissionToQueue, removePermissionFromQueue, sendPermissionResponse } from "./instances"
import { getSoloState, incrementStep, popFromTaskQueue, setActiveTaskId } from "./solo-store"
import { getSoloState, incrementStep, popFromTaskQueue, setActiveTaskId, canPerformAutonomousAction, recordAutonomousAction, resetErrorRecovery, clearContinuationFlag } from "./solo-store"
import { sendMessage, consumeTokenWarningSuppression, consumeCompactionSuppression, updateSessionModel } from "./session-actions"
import { showAlertDialog } from "./alerts"
import { sessions, setSessions, withSession } from "./session-state"
@@ -175,11 +175,21 @@ function handleMessageUpdate(instanceId: string, event: MessageUpdateEvent | Mes
// Auto-correction logic for SOLO
const solo = getSoloState(instanceId)
if (hasError && solo.isAutonomous && solo.currentStep < solo.maxSteps) {
log.info(`[SOLO] Error detected in autonomous mode, prompting for fix: ${messageId}`)
const errorMessage = (info as any).error?.message || "Unknown error"
// Check if we can perform autonomous error recovery (loop prevention)
if (!canPerformAutonomousAction(instanceId, "error_recovery")) {
log.warn("[SOLO] Error recovery blocked by loop prevention", { instanceId, sessionId, errorMessage })
return
}
log.info(`[SOLO] Error detected in autonomous mode, prompting for fix: ${messageId}`)
incrementStep(instanceId)
recordAutonomousAction(instanceId, "error_recovery", errorMessage)
sendMessage(instanceId, sessionId, `The previous step failed with error: ${errorMessage}. Please analyze the error and try a different approach.`, [], solo.activeTaskId || undefined).catch((err) => {
log.error("[SOLO] Failed to send error correction message", err)
resetErrorRecovery(instanceId)
})
}
@@ -338,10 +348,17 @@ function handleSessionIdle(instanceId: string, event: EventSessionIdle): void {
const session = instanceSessions?.get(sessionId)
if (!session) return
// If there's an active task, we might want to prompt the agent to continue or check progress
// If there's an active task, we might want to prompt to agent to continue or check progress
if (!canPerformAutonomousAction(instanceId, "idle_continuation")) {
log.warn("[SOLO] Idle continuation blocked by loop prevention", { instanceId, sessionId })
clearContinuationFlag(instanceId)
return
}
if (solo.activeTaskId) {
log.info(`[SOLO] Session idle in autonomous mode, prompting continuation for task: ${solo.activeTaskId}`)
incrementStep(instanceId)
recordAutonomousAction(instanceId, "idle_continuation")
sendMessage(instanceId, sessionId, "Continue", [], solo.activeTaskId).catch((err) => {
log.error("[SOLO] Failed to send continuation message", err)
})
@@ -363,6 +380,7 @@ function handleSessionIdle(instanceId: string, event: EventSessionIdle): void {
}
setActiveTaskId(instanceId, nextTaskId)
recordAutonomousAction(instanceId, "idle_continuation")
sendMessage(instanceId, sessionId, taskTitle, [], nextTaskId).catch((err) => {
log.error("[SOLO] Failed to start next task", err)
})
@@ -435,10 +453,19 @@ function handleSessionError(instanceId: string, event: EventSessionError): void
const sessionId = (event.properties as any)?.sessionID
if (solo.isAutonomous && sessionId && solo.currentStep < solo.maxSteps) {
const errorMessage = `I encountered an error: "${message}". Please analyze the cause and provide a fix.`
if (!canPerformAutonomousAction(instanceId, "error_recovery")) {
log.warn("[SOLO] Error recovery blocked by loop prevention", { instanceId, sessionId, message })
return
}
log.info(`[SOLO] Session error in autonomous mode, prompting fix: ${message}`)
incrementStep(instanceId)
sendMessage(instanceId, sessionId, `I encountered an error: "${message}". Please analyze the cause and provide a fix.`, [], solo.activeTaskId || undefined).catch((err) => {
recordAutonomousAction(instanceId, "error_recovery", message)
sendMessage(instanceId, sessionId, errorMessage, [], solo.activeTaskId || undefined).catch((err) => {
log.error("[SOLO] Failed to send error recovery message", err)
resetErrorRecovery(instanceId)
})
return
}

View File

@@ -154,8 +154,21 @@ function withSession(instanceId: string, sessionId: string, updater: (session: S
return next
})
// Persist session tasks to storage
persistSessionTasks(instanceId)
// Persist session tasks to storage (DEBOUNCED)
schedulePersist(instanceId)
}
// Debounce map for persistence
const persistTimers = new Map<string, ReturnType<typeof setTimeout>>()
function schedulePersist(instanceId: string) {
const existing = persistTimers.get(instanceId)
if (existing) clearTimeout(existing)
const timer = setTimeout(() => {
persistTimers.delete(instanceId)
persistSessionTasks(instanceId)
}, 2000)
persistTimers.set(instanceId, timer)
}
async function persistSessionTasks(instanceId: string) {
@@ -312,7 +325,7 @@ async function isBlankSession(session: Session, instanceId: string, fetchIfNeede
}
// For a more thorough deep clean, we need to look at actual messages
const instance = instances().get(instanceId)
if (!instance?.client) {
return isFreshSession
@@ -335,23 +348,23 @@ async function isBlankSession(session: Session, instanceId: string, fetchIfNeede
// Subagent: "blank" (really: finished doing its job) if actually blank...
// ... OR no streaming, no pending perms, no tool parts
if (messages.length === 0) return true
const hasStreaming = messages.some((msg) => {
const info = msg.info.status || msg.status
return info === "streaming" || info === "sending"
})
const lastMessage = messages[messages.length - 1]
const lastParts = lastMessage?.parts || []
const hasToolPart = lastParts.some((part: any) =>
const hasToolPart = lastParts.some((part: any) =>
part.type === "tool" || part.data?.type === "tool"
)
return !hasStreaming && !session.pendingPermission && !hasToolPart
} else {
// Fork: blank if somehow has no messages or at revert point
if (messages.length === 0) return true
const lastMessage = messages[messages.length - 1]
const lastInfo = lastMessage?.info || lastMessage
return lastInfo?.id === session.revert?.messageID
@@ -429,7 +442,7 @@ export {
setSessionCompactionState,
setSessionPendingPermission,
setActiveSession,
setActiveParentSession,
clearActiveParentSession,

View File

@@ -11,6 +11,11 @@ export interface SoloState {
currentStep: number
activeTaskId: string | null
taskQueue: string[]
// Loop prevention fields
lastActionTimestamp: number
consecutiveErrorCount: number
lastErrorHash: string
isContinuationFromIdle: boolean
}
const [soloStates, setSoloStates] = createSignal<Map<string, SoloState>>(new Map())
@@ -26,6 +31,10 @@ export function getSoloState(instanceId: string): SoloState {
currentStep: 0,
activeTaskId: null,
taskQueue: [],
lastActionTimestamp: 0,
consecutiveErrorCount: 0,
lastErrorHash: "",
isContinuationFromIdle: false,
}
}
return state
@@ -83,3 +92,75 @@ export function popFromTaskQueue(instanceId: string): string | null {
setSoloState(instanceId, { taskQueue: rest })
return next
}
function computeErrorHash(error: string): string {
const normalized = error.toLowerCase().replace(/\d+/g, "X").replace(/\s+/g, " ")
return normalized.slice(0, 100)
}
const COOLDOWN_MS = 3000
const MAX_CONSECUTIVE_ERRORS = 3
export function canPerformAutonomousAction(instanceId: string, actionType: "error_recovery" | "idle_continuation"): boolean {
const state = getSoloState(instanceId)
const now = Date.now()
if (actionType === "error_recovery") {
if (state.consecutiveErrorCount >= MAX_CONSECUTIVE_ERRORS) {
log.warn("Maximum consecutive errors reached, stopping autonomous error recovery", { instanceId, count: state.consecutiveErrorCount })
return false
}
}
if (actionType === "idle_continuation" && state.isContinuationFromIdle) {
log.warn("Already continuing from idle, preventing double continuation", { instanceId })
return false
}
const timeSinceLastAction = now - state.lastActionTimestamp
if (timeSinceLastAction < COOLDOWN_MS && state.lastActionTimestamp > 0) {
log.warn("Cooldown period active, delaying autonomous action", { instanceId, timeSinceLastAction })
return false
}
return true
}
export function recordAutonomousAction(instanceId: string, actionType: "error_recovery" | "idle_continuation", errorMessage?: string): void {
const state = getSoloState(instanceId)
const now = Date.now()
if (actionType === "error_recovery" && errorMessage) {
const errorHash = computeErrorHash(errorMessage)
const newErrorCount = errorHash === state.lastErrorHash ? state.consecutiveErrorCount + 1 : 1
setSoloState(instanceId, {
lastActionTimestamp: now,
consecutiveErrorCount: newErrorCount,
lastErrorHash: errorHash,
})
} else if (actionType === "idle_continuation") {
setSoloState(instanceId, {
lastActionTimestamp: now,
isContinuationFromIdle: true,
})
} else {
setSoloState(instanceId, {
lastActionTimestamp: now,
})
}
}
export function clearContinuationFlag(instanceId: string): void {
const state = getSoloState(instanceId)
if (state.isContinuationFromIdle) {
setSoloState(instanceId, { isContinuationFromIdle: false })
}
}
export function resetErrorRecovery(instanceId: string): void {
setSoloState(instanceId, {
consecutiveErrorCount: 0,
lastErrorHash: "",
})
}

View File

@@ -16,14 +16,14 @@ export async function addTask(
title: string
): Promise<{ id: string; taskSessionId?: string }> {
const id = nanoid()
console.log("[task-actions] addTask started", { instanceId, sessionId, title, taskId: id });
// console.log("[task-actions] addTask started", { instanceId, sessionId, title, taskId: id });
let taskSessionId: string | undefined
const parentSession = sessions().get(instanceId)?.get(sessionId)
const parentAgent = parentSession?.agent || ""
const parentModel = parentSession?.model
try {
console.log("[task-actions] creating new task session...");
// console.log("[task-actions] creating new task session...");
const created = await createSession(instanceId, parentAgent || undefined, { skipAutoCleanup: true })
taskSessionId = created.id
withSession(instanceId, taskSessionId, (taskSession) => {
@@ -35,7 +35,7 @@ export async function addTask(
taskSession.model = { ...parentModel }
}
})
console.log("[task-actions] task session created", { taskSessionId });
// console.log("[task-actions] task session created", { taskSessionId });
} catch (error) {
console.error("[task-actions] Failed to create session for task", error)
showToastNotification({
@@ -62,7 +62,7 @@ export async function addTask(
session.tasks = []
}
session.tasks = [newTask, ...session.tasks]
console.log("[task-actions] task added to session", { taskCount: session.tasks.length });
// console.log("[task-actions] task added to session", { taskCount: session.tasks.length });
})
return { id, taskSessionId }
@@ -74,7 +74,7 @@ export function addTaskMessage(
taskId: string,
messageId: string,
): void {
console.log("[task-actions] addTaskMessage called", { instanceId, sessionId, taskId, messageId });
// console.log("[task-actions] addTaskMessage called", { instanceId, sessionId, taskId, messageId });
withSession(instanceId, sessionId, (session) => {
let targetSessionId = sessionId
let targetTaskId = taskId
@@ -82,7 +82,7 @@ export function addTaskMessage(
// If this is a child session, the tasks are on the parent
if (session.parentId && !session.tasks) {
targetSessionId = session.parentId
console.log("[task-actions] task session detected, targeting parent", { parentId: session.parentId });
// console.log("[task-actions] task session detected, targeting parent", { parentId: session.parentId });
}
withSession(instanceId, targetSessionId, (targetSession) => {
@@ -105,9 +105,9 @@ export function addTaskMessage(
updatedTasks[taskIndex] = updatedTask
targetSession.tasks = updatedTasks
console.log("[task-actions] message ID added to task with reactivity", { taskId: task.id, messageCount: messageIds.length });
// console.log("[task-actions] message ID added to task with reactivity", { taskId: task.id, messageCount: messageIds.length });
} else {
console.log("[task-actions] message ID already in task", { taskId: task.id });
// console.log("[task-actions] message ID already in task", { taskId: task.id });
}
} else {
console.warn("[task-actions] task not found in session", { targetTaskId, sessionId, availableTaskCount: targetSession.tasks.length });