feat(ui): Enhanced agent thinking detection for Ollama models

- Added semantic detection module (agent-status-detection.ts) that analyzes
  message content for keywords like 'standby', 'processing', 'analyzing'
- Updated isAgentThinking() in multi-task-chat to use semantic detection
  when streaming has technically ended but agent is conceptually working
- Added dynamic status messages (PROCESSING, AGENT PROCESSING, etc.)
  instead of static THINKING/STREAMING labels
- Enhanced session-status.ts to check semantic content before returning idle
- Fixes issue where Ollama models output status messages and pause,
  causing UI to incorrectly show ready-to-send state
This commit is contained in:
Gemini AI
2025-12-30 02:53:21 +04:00
Unverified
parent eb863bdde7
commit 942582e981
3 changed files with 292 additions and 3 deletions

View File

@@ -10,6 +10,7 @@ import { addToTaskQueue, getSoloState, setActiveTaskId, toggleAutonomous, toggle
import { getLogger } from "@/lib/logger";
import { clearCompactionSuggestion, getCompactionSuggestion } from "@/stores/session-compaction";
import { emitSessionSidebarRequest } from "@/lib/session-sidebar-events";
import { detectAgentWorkingState, getAgentStatusMessage } from "@/lib/agent-status-detection";
import {
Command,
Plus,
@@ -216,7 +217,36 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
const store = messageStore();
const lastMsg = store.getMessage(ids[ids.length - 1]);
return lastMsg?.role === "assistant" && (lastMsg.status === "streaming" || lastMsg.status === "sending");
// Basic check: streaming or sending status
if (lastMsg?.role === "assistant" && (lastMsg.status === "streaming" || lastMsg.status === "sending")) {
return true;
}
// Enhanced check: semantic detection for "standby", "processing" messages
// This catches Ollama models that output status messages and pause
if (lastMsg?.role === "assistant") {
const workingState = detectAgentWorkingState(lastMsg);
return workingState.isWorking;
}
return false;
});
// Get dynamic status message for display
const agentStatusMessage = createMemo(() => {
const ids = filteredMessageIds();
if (ids.length === 0) return "THINKING";
const store = messageStore();
const lastMsg = store.getMessage(ids[ids.length - 1]);
if (!lastMsg || lastMsg.role !== "assistant") {
return "THINKING";
}
const statusMsg = getAgentStatusMessage(lastMsg);
return statusMsg?.toUpperCase() || "THINKING";
});
// Auto-scroll during streaming - DISABLED for performance testing
@@ -539,7 +569,7 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
<Show when={isAgentThinking()}>
<div class="flex items-center space-x-2 px-3 py-1.5 bg-violet-500/15 border border-violet-500/30 rounded-lg animate-pulse shadow-[0_0_20px_rgba(139,92,246,0.2)]">
<Sparkles size={12} class="text-violet-400 animate-spin" style={{ "animation-duration": "3s" }} />
<span class="text-[10px] font-black text-violet-400 uppercase tracking-tight">Streaming</span>
<span class="text-[10px] font-black text-violet-400 uppercase tracking-tight">{agentStatusMessage()}</span>
<span class="text-[10px] font-bold text-violet-300">{formatTokenTotal(tokenStats().used)}</span>
</div>
</Show>
@@ -846,7 +876,7 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
<div class="w-1 h-1 bg-indigo-400 rounded-full animate-bounce" style={{ "animation-delay": "150ms" }} />
<div class="w-1 h-1 bg-indigo-400 rounded-full animate-bounce" style={{ "animation-delay": "300ms" }} />
</div>
<span class="text-[9px] font-bold text-indigo-400">{isAgentThinking() ? "THINKING" : "SENDING"}</span>
<span class="text-[9px] font-bold text-indigo-400">{isSending() ? "SENDING" : agentStatusMessage()}</span>
</div>
</Show>
</div>