Integrate Context-Engine RAG service for enhanced LLM responses

Backend:
- Created context-engine/client.ts - HTTP client for Context-Engine API
- Created context-engine/service.ts - Lifecycle management of Context-Engine sidecar
- Created context-engine/index.ts - Module exports
- Created server/routes/context-engine.ts - API endpoints for status/health/query

Integration:
- workspaces/manager.ts: Trigger indexing when workspace becomes ready (non-blocking)
- index.ts: Initialize ContextEngineService on server start (lazy mode)
- ollama-cloud.ts: Inject RAG context into chat requests when available

Frontend:
- model-selector.tsx: Added Context-Engine status indicator
  - Green dot = Ready (RAG enabled)
  - Blue pulsing dot = Indexing
  - Red dot = Error
  - Hidden when Context-Engine not running

All operations are non-blocking with graceful fallback when Context-Engine is unavailable.
This commit is contained in:
Gemini AI
2025-12-24 22:20:13 +04:00
Unverified
parent e17e7cd32e
commit 743d0367e2
9 changed files with 805 additions and 15 deletions

View File

@@ -1,7 +1,7 @@
import { Combobox } from "@kobalte/core/combobox"
import { createEffect, createMemo, createSignal, onCleanup, onMount } from "solid-js"
import { createEffect, createMemo, createSignal, onCleanup, onMount, Show } from "solid-js"
import { providers, fetchProviders } from "../stores/sessions"
import { ChevronDown } from "lucide-solid"
import { ChevronDown, Database } from "lucide-solid"
import type { Model } from "../types/session"
import { getLogger } from "../lib/logger"
import { getUserScopedKey } from "../lib/user-storage"
@@ -29,6 +29,11 @@ export default function ModelSelector(props: ModelSelectorProps) {
const [isOpen, setIsOpen] = createSignal(false)
const qwenAuth = useQwenOAuth()
const [offlineModels, setOfflineModels] = createSignal<Set<string>>(new Set())
// Context-Engine status: "stopped" | "ready" | "indexing" | "error"
type ContextEngineStatus = "stopped" | "ready" | "indexing" | "error"
const [contextEngineStatus, setContextEngineStatus] = createSignal<ContextEngineStatus>("stopped")
let triggerRef!: HTMLButtonElement
let searchInputRef!: HTMLInputElement
@@ -64,9 +69,28 @@ export default function ModelSelector(props: ModelSelectorProps) {
}
window.addEventListener("opencode-zen-offline-models", handleCustom as EventListener)
window.addEventListener("storage", handleStorage)
// Poll Context-Engine status
const pollContextEngine = async () => {
try {
const response = await fetch("/api/context-engine/status")
if (response.ok) {
const data = await response.json() as { status: ContextEngineStatus }
setContextEngineStatus(data.status ?? "stopped")
} else {
setContextEngineStatus("stopped")
}
} catch {
setContextEngineStatus("stopped")
}
}
pollContextEngine()
const pollInterval = setInterval(pollContextEngine, 5000)
onCleanup(() => {
window.removeEventListener("opencode-zen-offline-models", handleCustom as EventListener)
window.removeEventListener("storage", handleStorage)
clearInterval(pollInterval)
})
})
@@ -170,6 +194,29 @@ export default function ModelSelector(props: ModelSelectorProps) {
{currentModelValue() && isOfflineModel(currentModelValue() as FlatModel) && (
<span class="selector-badge selector-badge-warning">Offline</span>
)}
{/* Context-Engine RAG Status Indicator */}
<Show when={contextEngineStatus() !== "stopped"}>
<span
class="inline-flex items-center gap-1 text-[10px]"
title={
contextEngineStatus() === "ready"
? "Context Engine is active - RAG enabled"
: contextEngineStatus() === "indexing"
? "Context Engine is indexing files..."
: "Context Engine error"
}
>
<span
class={`w-2 h-2 rounded-full ${contextEngineStatus() === "ready"
? "bg-emerald-500"
: contextEngineStatus() === "indexing"
? "bg-blue-500 animate-pulse"
: "bg-red-500"
}`}
/>
<Database class="w-3 h-3 text-zinc-400" />
</span>
</Show>
</span>
{currentModelValue() && (
<span class="selector-trigger-secondary">