restore: bring back all custom UI enhancements from checkpoint
Restored from commit 52be710 (checkpoint before qwen oauth + todo roller): Enhanced UI Features: - SMART FIX button with AI code analysis - APEX (Autonomous Programming EXecution) mode - SHIELD (Auto-approval) mode - MULTIX MODE multi-task pipeline interface - Live streaming token counter - Thinking indicator with bouncing dots animation Components restored: - packages/ui/src/components/chat/multi-task-chat.tsx - packages/ui/src/components/instance/instance-shell2.tsx - packages/ui/src/components/settings/OllamaCloudSettings.tsx - packages/ui/src/components/settings/QwenCodeSettings.tsx - packages/ui/src/stores/solo-store.ts - packages/ui/src/stores/task-actions.ts - packages/ui/src/stores/session-events.ts (autonomous mode) - packages/server/src/integrations/ollama-cloud.ts - packages/server/src/server/routes/ollama.ts - packages/server/src/server/routes/qwen.ts This ensures all custom features are preserved in source control.
This commit is contained in:
273
packages/server/src/integrations/ollama-cloud.ts
Normal file
273
packages/server/src/integrations/ollama-cloud.ts
Normal file
@@ -0,0 +1,273 @@
|
||||
/**
|
||||
* Ollama Cloud API Integration
|
||||
* Provides access to Ollama's cloud models through API
|
||||
*/
|
||||
|
||||
import { z } from "zod"
|
||||
|
||||
// Configuration schema for Ollama Cloud
|
||||
export const OllamaCloudConfigSchema = z.object({
|
||||
apiKey: z.string().optional(),
|
||||
endpoint: z.string().default("https://ollama.com"),
|
||||
enabled: z.boolean().default(false)
|
||||
})
|
||||
|
||||
export type OllamaCloudConfig = z.infer<typeof OllamaCloudConfigSchema>
|
||||
|
||||
// Model information schema
|
||||
export const OllamaModelSchema = z.object({
|
||||
name: z.string(),
|
||||
size: z.string(),
|
||||
digest: z.string(),
|
||||
modified_at: z.string(),
|
||||
created_at: z.string()
|
||||
})
|
||||
|
||||
export type OllamaModel = z.infer<typeof OllamaModelSchema>
|
||||
|
||||
// Chat message schema
|
||||
export const ChatMessageSchema = z.object({
|
||||
role: z.enum(["user", "assistant", "system"]),
|
||||
content: z.string(),
|
||||
images: z.array(z.string()).optional()
|
||||
})
|
||||
|
||||
export type ChatMessage = z.infer<typeof ChatMessageSchema>
|
||||
|
||||
// Chat request/response schemas
|
||||
export const ChatRequestSchema = z.object({
|
||||
model: z.string(),
|
||||
messages: z.array(ChatMessageSchema),
|
||||
stream: z.boolean().default(false),
|
||||
options: z.object({
|
||||
temperature: z.number().min(0).max(2).optional(),
|
||||
top_p: z.number().min(0).max(1).optional()
|
||||
}).optional()
|
||||
})
|
||||
|
||||
export const ChatResponseSchema = z.object({
|
||||
model: z.string(),
|
||||
created_at: z.string(),
|
||||
message: ChatMessageSchema,
|
||||
done: z.boolean().optional(),
|
||||
total_duration: z.number().optional(),
|
||||
load_duration: z.number().optional(),
|
||||
prompt_eval_count: z.number().optional(),
|
||||
prompt_eval_duration: z.number().optional(),
|
||||
eval_count: z.number().optional(),
|
||||
eval_duration: z.number().optional()
|
||||
})
|
||||
|
||||
export type ChatRequest = z.infer<typeof ChatRequestSchema>
|
||||
export type ChatResponse = z.infer<typeof ChatResponseSchema>
|
||||
|
||||
export class OllamaCloudClient {
|
||||
private config: OllamaCloudConfig
|
||||
private baseUrl: string
|
||||
|
||||
constructor(config: OllamaCloudConfig) {
|
||||
this.config = config
|
||||
this.baseUrl = config.endpoint.replace(/\/$/, "") // Remove trailing slash
|
||||
}
|
||||
|
||||
/**
|
||||
* Test connection to Ollama Cloud API
|
||||
*/
|
||||
async testConnection(): Promise<boolean> {
|
||||
try {
|
||||
const response = await this.makeRequest("/api/tags", {
|
||||
method: "GET"
|
||||
})
|
||||
return response.ok
|
||||
} catch (error) {
|
||||
console.error("Ollama Cloud connection test failed:", error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List available models
|
||||
*/
|
||||
async listModels(): Promise<OllamaModel[]> {
|
||||
try {
|
||||
const response = await this.makeRequest("/api/tags", {
|
||||
method: "GET"
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch models: ${response.statusText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return z.array(OllamaModelSchema).parse(data.models || [])
|
||||
} catch (error) {
|
||||
console.error("Failed to list Ollama Cloud models:", error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate chat completion
|
||||
*/
|
||||
async chat(request: ChatRequest): Promise<AsyncIterable<ChatResponse>> {
|
||||
if (!this.config.apiKey) {
|
||||
throw new Error("Ollama Cloud API key is required")
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
// Add authorization header if API key is provided
|
||||
if (this.config.apiKey) {
|
||||
headers["Authorization"] = `Bearer ${this.config.apiKey}`
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify(request)
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Chat request failed: ${response.statusText}`)
|
||||
}
|
||||
|
||||
if (request.stream) {
|
||||
return this.parseStreamingResponse(response)
|
||||
} else {
|
||||
const data = ChatResponseSchema.parse(await response.json())
|
||||
return this.createAsyncIterable([data])
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Ollama Cloud chat request failed:", error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull a model (for cloud models, this just makes them available)
|
||||
*/
|
||||
async pullModel(modelName: string): Promise<void> {
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
if (this.config.apiKey) {
|
||||
headers["Authorization"] = `Bearer ${this.config.apiKey}`
|
||||
}
|
||||
|
||||
const response = await fetch(`${this.baseUrl}/api/pull`, {
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify({ name: modelName })
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to pull model ${modelName}: ${response.statusText}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse streaming response
|
||||
*/
|
||||
private async *parseStreamingResponse(response: Response): AsyncIterable<ChatResponse> {
|
||||
if (!response.body) {
|
||||
throw new Error("Response body is missing")
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
const lines = decoder.decode(value, { stream: true }).split('\n').filter(line => line.trim())
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const data = JSON.parse(line)
|
||||
const chatResponse = ChatResponseSchema.parse(data)
|
||||
yield chatResponse
|
||||
|
||||
if (chatResponse.done) {
|
||||
return
|
||||
}
|
||||
} catch (parseError) {
|
||||
// Skip invalid JSON lines
|
||||
console.warn("Failed to parse streaming line:", line, parseError)
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create async iterable from array
|
||||
*/
|
||||
private async *createAsyncIterable<T>(items: T[]): AsyncIterable<T> {
|
||||
for (const item of items) {
|
||||
yield item
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make authenticated request to API
|
||||
*/
|
||||
private async makeRequest(endpoint: string, options: RequestInit): Promise<Response> {
|
||||
const url = `${this.baseUrl}${endpoint}`
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
...options.headers as Record<string, string>
|
||||
}
|
||||
|
||||
// Add authorization header if API key is provided
|
||||
if (this.config.apiKey) {
|
||||
headers["Authorization"] = `Bearer ${this.config.apiKey}`
|
||||
}
|
||||
|
||||
return fetch(url, {
|
||||
...options,
|
||||
headers
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cloud-specific models (models ending with -cloud)
|
||||
*/
|
||||
async getCloudModels(): Promise<OllamaModel[]> {
|
||||
const allModels = await this.listModels()
|
||||
return allModels.filter(model => model.name.endsWith("-cloud"))
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate API key format
|
||||
*/
|
||||
static validateApiKey(apiKey: string): boolean {
|
||||
return typeof apiKey === "string" && apiKey.length > 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available cloud model names
|
||||
*/
|
||||
async getCloudModelNames(): Promise<string[]> {
|
||||
const cloudModels = await this.getCloudModels()
|
||||
return cloudModels.map(model => model.name)
|
||||
}
|
||||
}
|
||||
|
||||
// Default cloud models based on Ollama documentation
|
||||
export const DEFAULT_CLOUD_MODELS = [
|
||||
"gpt-oss:120b-cloud",
|
||||
"llama3.1:70b-cloud",
|
||||
"llama3.1:8b-cloud",
|
||||
"qwen2.5:32b-cloud",
|
||||
"qwen2.5:7b-cloud"
|
||||
] as const
|
||||
|
||||
export type CloudModelName = typeof DEFAULT_CLOUD_MODELS[number]
|
||||
Reference in New Issue
Block a user