diff --git a/packages/server/src/integrations/opencode-zen.ts b/packages/server/src/integrations/opencode-zen.ts new file mode 100644 index 0000000..0ba9ad5 --- /dev/null +++ b/packages/server/src/integrations/opencode-zen.ts @@ -0,0 +1,312 @@ +/** + * OpenCode Zen API Integration + * Provides direct access to OpenCode's free "Zen" models without requiring opencode.exe + * Based on reverse-engineering the OpenCode source at https://github.com/sst/opencode + * + * Free models (cost.input === 0) can be accessed with apiKey: "public" + */ + +import { z } from "zod" + +// Configuration schema for OpenCode Zen +export const OpenCodeZenConfigSchema = z.object({ + enabled: z.boolean().default(true), // Free models enabled by default + endpoint: z.string().default("https://api.opencode.ai/v1"), + apiKey: z.string().default("public") // "public" key for free models +}) + +export type OpenCodeZenConfig = z.infer + +// Model schema matching models.dev format +export const ZenModelSchema = z.object({ + id: z.string(), + name: z.string(), + family: z.string().optional(), + reasoning: z.boolean().optional(), + tool_call: z.boolean().optional(), + attachment: z.boolean().optional(), + temperature: z.boolean().optional(), + cost: z.object({ + input: z.number(), + output: z.number(), + cache_read: z.number().optional(), + cache_write: z.number().optional() + }).optional(), + limit: z.object({ + context: z.number(), + output: z.number() + }).optional() +}) + +export type ZenModel = z.infer + +// Chat message schema (OpenAI-compatible) +export const ChatMessageSchema = z.object({ + role: z.enum(["user", "assistant", "system"]), + content: z.string() +}) + +export type ChatMessage = z.infer + +// Chat request schema +export const ChatRequestSchema = z.object({ + model: z.string(), + messages: z.array(ChatMessageSchema), + stream: z.boolean().default(true), + temperature: z.number().optional(), + max_tokens: z.number().optional() +}) + +export type ChatRequest = z.infer + +// Chat response chunk schema +export const ChatChunkSchema = z.object({ + id: z.string().optional(), + object: z.string().optional(), + created: z.number().optional(), + model: z.string().optional(), + choices: z.array(z.object({ + index: z.number(), + delta: z.object({ + role: z.string().optional(), + content: z.string().optional() + }).optional(), + message: z.object({ + role: z.string(), + content: z.string() + }).optional(), + finish_reason: z.string().nullable().optional() + })) +}) + +export type ChatChunk = z.infer + +// Known free OpenCode Zen models (cost.input === 0) +// From models.dev API - these are the free tier models +export const FREE_ZEN_MODELS: ZenModel[] = [ + { + id: "gpt-5-nano", + name: "GPT-5 Nano", + family: "gpt-5-nano", + reasoning: true, + tool_call: true, + attachment: true, + temperature: false, + cost: { input: 0, output: 0 }, + limit: { context: 400000, output: 128000 } + }, + { + id: "big-pickle", + name: "Big Pickle", + family: "pickle", + reasoning: false, + tool_call: true, + attachment: false, + temperature: true, + cost: { input: 0, output: 0 }, + limit: { context: 128000, output: 16384 } + }, + { + id: "grok-code-fast-1", + name: "Grok Code Fast 1", + family: "grok", + reasoning: true, + tool_call: true, + attachment: false, + temperature: true, + cost: { input: 0, output: 0 }, + limit: { context: 256000, output: 10000 } + }, + { + id: "minimax-m2.1", + name: "MiniMax M2.1", + family: "minimax", + reasoning: true, + tool_call: true, + attachment: false, + temperature: true, + cost: { input: 0, output: 0 }, + limit: { context: 205000, output: 131072 } + } +] + +export class OpenCodeZenClient { + private config: OpenCodeZenConfig + private baseUrl: string + private modelsCache: ZenModel[] | null = null + private modelsCacheTime: number = 0 + private readonly CACHE_TTL_MS = 5 * 60 * 1000 // 5 minutes + + constructor(config?: Partial) { + this.config = OpenCodeZenConfigSchema.parse(config || {}) + this.baseUrl = this.config.endpoint.replace(/\/$/, "") + } + + /** + * Get free Zen models from OpenCode + */ + async getModels(): Promise { + // Return cached models if still valid + const now = Date.now() + if (this.modelsCache && (now - this.modelsCacheTime) < this.CACHE_TTL_MS) { + return this.modelsCache + } + + try { + // Try to fetch fresh models from models.dev + const response = await fetch("https://models.dev/api.json", { + headers: { + "User-Agent": "NomadArch/1.0" + }, + signal: AbortSignal.timeout(10000) + }) + + if (response.ok) { + const data = await response.json() + // Extract OpenCode provider and filter free models + const opencodeProvider = data["opencode"] + if (opencodeProvider && opencodeProvider.models) { + const freeModels: ZenModel[] = [] + for (const [id, model] of Object.entries(opencodeProvider.models)) { + const m = model as any + if (m.cost && m.cost.input === 0) { + freeModels.push({ + id, + name: m.name, + family: m.family, + reasoning: m.reasoning, + tool_call: m.tool_call, + attachment: m.attachment, + temperature: m.temperature, + cost: m.cost, + limit: m.limit + }) + } + } + if (freeModels.length > 0) { + this.modelsCache = freeModels + this.modelsCacheTime = now + return freeModels + } + } + } + } catch (error) { + console.warn("Failed to fetch models from models.dev, using fallback:", error) + } + + // Fallback to hardcoded free models + this.modelsCache = FREE_ZEN_MODELS + this.modelsCacheTime = now + return FREE_ZEN_MODELS + } + + /** + * Test connection to OpenCode Zen API + */ + async testConnection(): Promise { + try { + const models = await this.getModels() + return models.length > 0 + } catch (error) { + console.error("OpenCode Zen connection test failed:", error) + return false + } + } + + /** + * Chat completion (streaming) + */ + async *chatStream(request: ChatRequest): AsyncGenerator { + const response = await fetch(`${this.baseUrl}/chat/completions`, { + method: "POST", + headers: { + "Content-Type": "application/json", + "Authorization": `Bearer ${this.config.apiKey}`, + "User-Agent": "NomadArch/1.0" + }, + body: JSON.stringify({ + ...request, + stream: true + }) + }) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`OpenCode Zen API error (${response.status}): ${errorText}`) + } + + if (!response.body) { + throw new Error("Response body is missing") + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + let buffer = "" + + try { + while (true) { + const { done, value } = await reader.read() + if (done) break + + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split("\n") + buffer = lines.pop() || "" + + for (const line of lines) { + const trimmed = line.trim() + if (trimmed.startsWith("data: ")) { + const data = trimmed.slice(6) + if (data === "[DONE]") return + + try { + const parsed = JSON.parse(data) + yield parsed as ChatChunk + + // Check for finish + if (parsed.choices?.[0]?.finish_reason) { + return + } + } catch (e) { + // Skip invalid JSON + } + } + } + } + } finally { + reader.releaseLock() + } + } + + /** + * Chat completion (non-streaming) + */ + async chat(request: ChatRequest): Promise { + const response = await fetch(`${this.baseUrl}/chat/completions`, { + method: "POST", + headers: { + "Content-Type": "application/json", + "Authorization": `Bearer ${this.config.apiKey}`, + "User-Agent": "NomadArch/1.0" + }, + body: JSON.stringify({ + ...request, + stream: false + }) + }) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`OpenCode Zen API error (${response.status}): ${errorText}`) + } + + return await response.json() + } +} + +export function getDefaultZenConfig(): OpenCodeZenConfig { + return { + enabled: true, + endpoint: "https://api.opencode.ai/v1", + apiKey: "public" + } +} diff --git a/packages/server/src/integrations/zai-api.ts b/packages/server/src/integrations/zai-api.ts new file mode 100644 index 0000000..037f947 --- /dev/null +++ b/packages/server/src/integrations/zai-api.ts @@ -0,0 +1,241 @@ +/** + * Z.AI API Integration + * Provides access to Z.AI's GLM Coding Plan API (Anthropic-compatible) + * Based on https://docs.z.ai/devpack/tool/claude#step-2-config-glm-coding-plan + */ + +import { z } from "zod" + +// Configuration schema for Z.AI +export const ZAIConfigSchema = z.object({ + apiKey: z.string().optional(), + endpoint: z.string().default("https://api.z.ai/api/anthropic"), + enabled: z.boolean().default(false), + timeout: z.number().default(3000000) // 50 minutes as per docs +}) + +export type ZAIConfig = z.infer + +// Message schema (Anthropic-compatible) +export const ZAIMessageSchema = z.object({ + role: z.enum(["user", "assistant"]), + content: z.string() +}) + +export type ZAIMessage = z.infer + +// Chat request schema +export const ZAIChatRequestSchema = z.object({ + model: z.string().default("claude-sonnet-4-20250514"), + messages: z.array(ZAIMessageSchema), + max_tokens: z.number().default(8192), + stream: z.boolean().default(true), + system: z.string().optional() +}) + +export type ZAIChatRequest = z.infer + +// Chat response schema +export const ZAIChatResponseSchema = z.object({ + id: z.string(), + type: z.string(), + role: z.string(), + content: z.array(z.object({ + type: z.string(), + text: z.string().optional() + })), + model: z.string(), + stop_reason: z.string().nullable().optional(), + stop_sequence: z.string().nullable().optional(), + usage: z.object({ + input_tokens: z.number(), + output_tokens: z.number() + }).optional() +}) + +export type ZAIChatResponse = z.infer + +// Stream chunk schema +export const ZAIStreamChunkSchema = z.object({ + type: z.string(), + index: z.number().optional(), + delta: z.object({ + type: z.string().optional(), + text: z.string().optional() + }).optional(), + message: z.object({ + id: z.string(), + type: z.string(), + role: z.string(), + content: z.array(z.any()), + model: z.string() + }).optional(), + content_block: z.object({ + type: z.string(), + text: z.string() + }).optional() +}) + +export type ZAIStreamChunk = z.infer + +export class ZAIClient { + private config: ZAIConfig + private baseUrl: string + + constructor(config: ZAIConfig) { + this.config = config + this.baseUrl = config.endpoint.replace(/\/$/, "") // Remove trailing slash + } + + /** + * Test connection to Z.AI API + */ + async testConnection(): Promise { + if (!this.config.apiKey) { + return false + } + + try { + // Make a minimal request to test auth + const response = await fetch(`${this.baseUrl}/v1/messages`, { + method: "POST", + headers: this.getHeaders(), + body: JSON.stringify({ + model: "claude-sonnet-4-20250514", + max_tokens: 1, + messages: [{ role: "user", content: "test" }] + }) + }) + + // Any response other than auth error means connection works + return response.status !== 401 && response.status !== 403 + } catch (error) { + console.error("Z.AI connection test failed:", error) + return false + } + } + + /** + * List available models + */ + async listModels(): Promise { + // Z.AI provides access to Claude models through their proxy + return [ + "claude-sonnet-4-20250514", + "claude-3-5-sonnet-20241022", + "claude-3-opus-20240229", + "claude-3-haiku-20240307" + ] + } + + /** + * Chat completion (streaming) + */ + async *chatStream(request: ZAIChatRequest): AsyncGenerator { + if (!this.config.apiKey) { + throw new Error("Z.AI API key is required") + } + + const response = await fetch(`${this.baseUrl}/v1/messages`, { + method: "POST", + headers: this.getHeaders(), + body: JSON.stringify({ + ...request, + stream: true + }) + }) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`Z.AI API error (${response.status}): ${errorText}`) + } + + if (!response.body) { + throw new Error("Response body is missing") + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + let buffer = "" + + try { + while (true) { + const { done, value } = await reader.read() + if (done) break + + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split("\n") + buffer = lines.pop() || "" // Keep incomplete line in buffer + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6).trim() + if (data === "[DONE]") return + + try { + const parsed = JSON.parse(data) + yield parsed as ZAIStreamChunk + } catch (e) { + // Skip invalid JSON + } + } + } + } + } finally { + reader.releaseLock() + } + } + + /** + * Chat completion (non-streaming) + */ + async chat(request: ZAIChatRequest): Promise { + if (!this.config.apiKey) { + throw new Error("Z.AI API key is required") + } + + const response = await fetch(`${this.baseUrl}/v1/messages`, { + method: "POST", + headers: this.getHeaders(), + body: JSON.stringify({ + ...request, + stream: false + }) + }) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`Z.AI API error (${response.status}): ${errorText}`) + } + + return await response.json() + } + + /** + * Get request headers + */ + private getHeaders(): Record { + return { + "Content-Type": "application/json", + "x-api-key": this.config.apiKey || "", + "anthropic-version": "2023-06-01" + } + } + + /** + * Validate API key + */ + static validateApiKey(apiKey: string): boolean { + return typeof apiKey === "string" && apiKey.length > 0 + } +} + +// Default available models +export const ZAI_MODELS = [ + "claude-sonnet-4-20250514", + "claude-3-5-sonnet-20241022", + "claude-3-opus-20240229", + "claude-3-haiku-20240307" +] as const + +export type ZAIModelName = typeof ZAI_MODELS[number] diff --git a/packages/server/src/server/http-server.ts b/packages/server/src/server/http-server.ts index bf3b932..06ffc27 100644 --- a/packages/server/src/server/http-server.ts +++ b/packages/server/src/server/http-server.ts @@ -20,6 +20,8 @@ import { registerEventRoutes } from "./routes/events" import { registerStorageRoutes } from "./routes/storage" import { registerOllamaRoutes } from "./routes/ollama" import { registerQwenRoutes } from "./routes/qwen" +import { registerZAIRoutes } from "./routes/zai" +import { registerOpenCodeZenRoutes } from "./routes/opencode-zen" import { ServerMeta } from "../api-types" import { InstanceStore } from "../storage/instance-store" @@ -65,7 +67,7 @@ export function createHttpServer(deps: HttpServerDeps) { } app.addHook("onRequest", (request, _reply, done) => { - ;(request as FastifyRequest & { __logMeta?: { start: bigint } }).__logMeta = { + ; (request as FastifyRequest & { __logMeta?: { start: bigint } }).__logMeta = { start: process.hrtime.bigint(), } done() @@ -114,6 +116,8 @@ export function createHttpServer(deps: HttpServerDeps) { }) registerOllamaRoutes(app, { logger: deps.logger }) registerQwenRoutes(app, { logger: deps.logger }) + registerZAIRoutes(app, { logger: deps.logger }) + registerOpenCodeZenRoutes(app, { logger: deps.logger }) registerInstanceProxyRoutes(app, { workspaceManager: deps.workspaceManager, logger: proxyLogger }) diff --git a/packages/server/src/server/routes/opencode-zen.ts b/packages/server/src/server/routes/opencode-zen.ts new file mode 100644 index 0000000..8199b59 --- /dev/null +++ b/packages/server/src/server/routes/opencode-zen.ts @@ -0,0 +1,93 @@ +import { FastifyInstance } from "fastify" +import { OpenCodeZenClient, type ChatRequest, getDefaultZenConfig } from "../../integrations/opencode-zen" +import { Logger } from "../../logger" + +interface OpenCodeZenRouteDeps { + logger: Logger +} + +export async function registerOpenCodeZenRoutes( + app: FastifyInstance, + deps: OpenCodeZenRouteDeps +) { + const logger = deps.logger.child({ component: "opencode-zen-routes" }) + + // Create shared client + const client = new OpenCodeZenClient(getDefaultZenConfig()) + + // List available free Zen models + app.get('/api/opencode-zen/models', async (request, reply) => { + try { + const models = await client.getModels() + + return { + models: models.map(m => ({ + id: m.id, + name: m.name, + family: m.family, + provider: "opencode-zen", + free: true, + reasoning: m.reasoning, + tool_call: m.tool_call, + limit: m.limit + })) + } + } catch (error) { + logger.error({ error }, "Failed to list OpenCode Zen models") + return reply.status(500).send({ error: "Failed to list models" }) + } + }) + + // Test connection + app.get('/api/opencode-zen/test', async (request, reply) => { + try { + const connected = await client.testConnection() + return { connected } + } catch (error) { + logger.error({ error }, "OpenCode Zen connection test failed") + return reply.status(500).send({ error: "Connection test failed" }) + } + }) + + // Chat completion endpoint + app.post('/api/opencode-zen/chat', async (request, reply) => { + try { + const chatRequest = request.body as ChatRequest + + // Handle streaming + if (chatRequest.stream) { + reply.raw.writeHead(200, { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + }) + + try { + for await (const chunk of client.chatStream(chatRequest)) { + reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`) + + // Check for finish + if (chunk.choices?.[0]?.finish_reason) { + reply.raw.write('data: [DONE]\n\n') + break + } + } + + reply.raw.end() + } catch (streamError) { + logger.error({ error: streamError }, "OpenCode Zen streaming failed") + reply.raw.write(`data: ${JSON.stringify({ error: String(streamError) })}\n\n`) + reply.raw.end() + } + } else { + const response = await client.chat(chatRequest) + return response + } + } catch (error) { + logger.error({ error }, "OpenCode Zen chat request failed") + return reply.status(500).send({ error: "Chat request failed" }) + } + }) + + logger.info("OpenCode Zen routes registered - Free models available!") +} diff --git a/packages/server/src/server/routes/zai.ts b/packages/server/src/server/routes/zai.ts new file mode 100644 index 0000000..92e25f3 --- /dev/null +++ b/packages/server/src/server/routes/zai.ts @@ -0,0 +1,153 @@ +import { FastifyInstance } from "fastify" +import { ZAIClient, type ZAIConfig, type ZAIChatRequest } from "../../integrations/zai-api" +import { Logger } from "../../logger" +import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs" +import { join } from "path" +import { homedir } from "os" + +interface ZAIRouteDeps { + logger: Logger +} + +// Config file path +const CONFIG_DIR = join(homedir(), ".nomadarch") +const CONFIG_FILE = join(CONFIG_DIR, "zai-config.json") + +export async function registerZAIRoutes( + app: FastifyInstance, + deps: ZAIRouteDeps +) { + const logger = deps.logger.child({ component: "zai-routes" }) + + // Ensure config directory exists + if (!existsSync(CONFIG_DIR)) { + mkdirSync(CONFIG_DIR, { recursive: true }) + } + + // Get Z.AI configuration + app.get('/api/zai/config', async (request, reply) => { + try { + const config = getZAIConfig() + return { config: { ...config, apiKey: config.apiKey ? '***' : undefined } } + } catch (error) { + logger.error({ error }, "Failed to get Z.AI config") + return reply.status(500).send({ error: "Failed to get Z.AI configuration" }) + } + }) + + // Update Z.AI configuration + app.post('/api/zai/config', async (request, reply) => { + try { + const { enabled, apiKey, endpoint } = request.body as Partial + updateZAIConfig({ enabled, apiKey, endpoint }) + logger.info("Z.AI configuration updated") + return { success: true, config: { enabled, endpoint, apiKey: apiKey ? '***' : undefined } } + } catch (error) { + logger.error({ error }, "Failed to update Z.AI config") + return reply.status(500).send({ error: "Failed to update Z.AI configuration" }) + } + }) + + // Test Z.AI connection + app.post('/api/zai/test', async (request, reply) => { + try { + const config = getZAIConfig() + if (!config.enabled) { + return reply.status(400).send({ error: "Z.AI is not enabled" }) + } + + const client = new ZAIClient(config) + const isConnected = await client.testConnection() + + return { connected: isConnected } + } catch (error) { + logger.error({ error }, "Z.AI connection test failed") + return reply.status(500).send({ error: "Connection test failed" }) + } + }) + + // List available models + app.get('/api/zai/models', async (request, reply) => { + try { + const config = getZAIConfig() + if (!config.enabled) { + return reply.status(400).send({ error: "Z.AI is not enabled" }) + } + + const client = new ZAIClient(config) + const models = await client.listModels() + + return { models: models.map(name => ({ name, provider: "zai" })) } + } catch (error) { + logger.error({ error }, "Failed to list Z.AI models") + return reply.status(500).send({ error: "Failed to list models" }) + } + }) + + // Chat completion endpoint + app.post('/api/zai/chat', async (request, reply) => { + try { + const config = getZAIConfig() + if (!config.enabled) { + return reply.status(400).send({ error: "Z.AI is not enabled" }) + } + + const client = new ZAIClient(config) + const chatRequest = request.body as ZAIChatRequest + + // Handle streaming + if (chatRequest.stream) { + reply.raw.writeHead(200, { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + }) + + try { + for await (const chunk of client.chatStream(chatRequest)) { + reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`) + + // Check for message_stop event + if (chunk.type === "message_stop") { + reply.raw.write('data: [DONE]\n\n') + break + } + } + + reply.raw.end() + } catch (streamError) { + logger.error({ error: streamError }, "Z.AI streaming failed") + reply.raw.write(`data: ${JSON.stringify({ error: String(streamError) })}\n\n`) + reply.raw.end() + } + } else { + const response = await client.chat(chatRequest) + return response + } + } catch (error) { + logger.error({ error }, "Z.AI chat request failed") + return reply.status(500).send({ error: "Chat request failed" }) + } + }) + + logger.info("Z.AI routes registered") +} + +// Configuration management functions using file-based storage +function getZAIConfig(): ZAIConfig { + try { + if (existsSync(CONFIG_FILE)) { + const data = readFileSync(CONFIG_FILE, 'utf-8') + return JSON.parse(data) + } + return { enabled: false, endpoint: "https://api.z.ai/api/anthropic", timeout: 3000000 } + } catch { + return { enabled: false, endpoint: "https://api.z.ai/api/anthropic", timeout: 3000000 } + } +} + +function updateZAIConfig(config: Partial): void { + const current = getZAIConfig() + const updated = { ...current, ...config } + writeFileSync(CONFIG_FILE, JSON.stringify(updated, null, 2)) +} diff --git a/packages/ui/src/components/advanced-settings-modal.tsx b/packages/ui/src/components/advanced-settings-modal.tsx index 8af70fa..ba8c3eb 100644 --- a/packages/ui/src/components/advanced-settings-modal.tsx +++ b/packages/ui/src/components/advanced-settings-modal.tsx @@ -4,6 +4,8 @@ import OpenCodeBinarySelector from "./opencode-binary-selector" import EnvironmentVariablesEditor from "./environment-variables-editor" import OllamaCloudSettings from "./settings/OllamaCloudSettings" import QwenCodeSettings from "./settings/QwenCodeSettings" +import ZAISettings from "./settings/ZAISettings" +import OpenCodeZenSettings from "./settings/OpenCodeZenSettings" interface AdvancedSettingsModalProps { open: boolean @@ -27,41 +29,60 @@ const AdvancedSettingsModal: Component = (props) =>
-
+
+ +
+ + + +
= (props) => + + + +
diff --git a/packages/ui/src/components/settings/OpenCodeZenSettings.tsx b/packages/ui/src/components/settings/OpenCodeZenSettings.tsx new file mode 100644 index 0000000..245dafd --- /dev/null +++ b/packages/ui/src/components/settings/OpenCodeZenSettings.tsx @@ -0,0 +1,222 @@ +import { Component, createSignal, onMount, For, Show } from 'solid-js' +import { Zap, CheckCircle, XCircle, Loader, Sparkles } from 'lucide-solid' + +interface ZenModel { + id: string + name: string + family?: string + free: boolean + reasoning?: boolean + tool_call?: boolean + limit?: { + context: number + output: number + } +} + +const OpenCodeZenSettings: Component = () => { + const [models, setModels] = createSignal([]) + const [isLoading, setIsLoading] = createSignal(true) + const [connectionStatus, setConnectionStatus] = createSignal<'idle' | 'testing' | 'connected' | 'failed'>('idle') + const [error, setError] = createSignal(null) + + // Load models on mount + onMount(async () => { + await loadModels() + await testConnection() + }) + + const loadModels = async () => { + setIsLoading(true) + try { + const response = await fetch('/api/opencode-zen/models') + if (response.ok) { + const data = await response.json() + setModels(data.models || []) + setError(null) + } else { + throw new Error('Failed to load models') + } + } catch (err) { + console.error('Failed to load OpenCode Zen models:', err) + setError('Failed to load models') + } finally { + setIsLoading(false) + } + } + + const testConnection = async () => { + setConnectionStatus('testing') + try { + const response = await fetch('/api/opencode-zen/test') + if (response.ok) { + const data = await response.json() + setConnectionStatus(data.connected ? 'connected' : 'failed') + } else { + setConnectionStatus('failed') + } + } catch (err) { + setConnectionStatus('failed') + } + } + + const formatNumber = (num: number): string => { + if (num >= 1000000) return `${(num / 1000000).toFixed(1)}M` + if (num >= 1000) return `${(num / 1000).toFixed(0)}K` + return num.toString() + } + + return ( +
+ {/* Header */} +
+
+
+ +
+
+

OpenCode Zen

+

Free AI models - No API key required!

+
+
+ +
+ {connectionStatus() === 'testing' && ( + + + Testing... + + )} + {connectionStatus() === 'connected' && ( + + + Connected + + )} + {connectionStatus() === 'failed' && ( + + + Offline + + )} +
+
+ + {/* Info Banner */} +
+
+ +
+

Free Models Available!

+

+ OpenCode Zen provides access to powerful AI models completely free of charge. + These models are ready to use immediately - no API keys or authentication required! +

+
+
+
+ + {/* Models Grid */} +
+
+

Available Free Models

+ +
+ + +
+ {error()} +
+
+ + +
+
+ + Loading models... +
+
+
+ + 0}> +
+ + {(model) => ( +
+
+
+

+ {model.name} +

+

{model.id}

+
+ + FREE + +
+ +
+ {model.reasoning && ( + + Reasoning + + )} + {model.tool_call && ( + + Tool Use + + )} + {model.family && ( + + {model.family} + + )} +
+ + {model.limit && ( +
+ Context: {formatNumber(model.limit.context)} + Output: {formatNumber(model.limit.output)} +
+ )} +
+ )} +
+
+
+ + +
+

No free models available at this time.

+ +
+
+
+ + {/* Usage Info */} +
+

How to Use

+
    +
  • • Select any Zen model from the model picker in chat
  • +
  • • No API key configuration needed - just start chatting!
  • +
  • • Models support streaming, reasoning, and tool use
  • +
  • • Rate limits may apply during high demand periods
  • +
+
+
+ ) +} + +export default OpenCodeZenSettings diff --git a/packages/ui/src/components/settings/ZAISettings.tsx b/packages/ui/src/components/settings/ZAISettings.tsx new file mode 100644 index 0000000..e2f3f36 --- /dev/null +++ b/packages/ui/src/components/settings/ZAISettings.tsx @@ -0,0 +1,249 @@ +import { Component, createSignal, onMount, Show } from 'solid-js' +import toast from 'solid-toast' +import { Button } from '@suid/material' +import { Cpu, CheckCircle, XCircle, Loader, Key, ExternalLink } from 'lucide-solid' + +interface ZAIConfig { + enabled: boolean + apiKey?: string + endpoint?: string +} + +const ZAISettings: Component = () => { + const [config, setConfig] = createSignal({ enabled: false }) + const [isLoading, setIsLoading] = createSignal(false) + const [isTesting, setIsTesting] = createSignal(false) + const [connectionStatus, setConnectionStatus] = createSignal<'idle' | 'testing' | 'connected' | 'failed'>('idle') + const [models, setModels] = createSignal([]) + + // Load config on mount + onMount(async () => { + try { + const response = await fetch('/api/zai/config') + if (response.ok) { + const data = await response.json() + setConfig(data.config) + } + } catch (error) { + console.error('Failed to load Z.AI config:', error) + } + }) + + const handleConfigChange = (field: keyof ZAIConfig, value: any) => { + setConfig(prev => ({ ...prev, [field]: value })) + setConnectionStatus('idle') + } + + const saveConfig = async () => { + setIsLoading(true) + try { + const response = await fetch('/api/zai/config', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(config()) + }) + + if (response.ok) { + toast.success('Z.AI configuration saved', { + duration: 3000, + icon: + }) + } else { + throw new Error('Failed to save config') + } + } catch (error) { + toast.error('Failed to save Z.AI configuration', { + duration: 5000, + icon: + }) + } finally { + setIsLoading(false) + } + } + + const testConnection = async () => { + setIsTesting(true) + setConnectionStatus('testing') + + try { + const response = await fetch('/api/zai/test', { + method: 'POST' + }) + + if (response.ok) { + const data = await response.json() + setConnectionStatus(data.connected ? 'connected' : 'failed') + + if (data.connected) { + toast.success('Successfully connected to Z.AI', { + duration: 3000, + icon: + }) + + // Load models after successful connection + loadModels() + } else { + toast.error('Failed to connect to Z.AI', { + duration: 3000, + icon: + }) + } + } else { + throw new Error('Connection test failed') + } + } catch (error) { + setConnectionStatus('failed') + toast.error('Connection test failed', { + duration: 3000, + icon: + }) + } finally { + setIsTesting(false) + } + } + + const loadModels = async () => { + try { + const response = await fetch('/api/zai/models') + if (response.ok) { + const data = await response.json() + setModels(data.models.map((m: any) => m.name)) + } + } catch (error) { + console.error('Failed to load models:', error) + } + } + + const getStatusIcon = () => { + switch (connectionStatus()) { + case 'testing': + return + case 'connected': + return + case 'failed': + return + default: + return null + } + } + + return ( +
+
+ +

Z.AI Integration

+
+ +
+

GLM Coding Plan

+

+ Z.AI provides access to Claude models through their GLM Coding Plan. Get your API key from the{' '} + + Z.AI Platform + +

+
+ +
+ {/* Enable/Disable Toggle */} +
+ + handleConfigChange('enabled', e.target.checked)} + class="w-4 h-4" + /> +
+ + {/* API Key */} +
+ + handleConfigChange('apiKey', e.target.value)} + class="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 bg-white dark:bg-gray-800" + disabled={!config().enabled} + /> +

+ Get your key from z.ai/manage-apikey +

+
+ + {/* Endpoint */} +
+ + handleConfigChange('endpoint', e.target.value)} + class="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 bg-white dark:bg-gray-800" + disabled={!config().enabled} + /> +
+ + {/* Test Connection */} +
+ + + + Connected successfully + + + Connection failed + +
+ + {/* Available Models */} + 0}> +
+ +
+ {models().map(model => ( +
+ {model} +
+ ))} +
+
+
+ + {/* Save Configuration */} +
+ +
+
+
+ ) +} + +export default ZAISettings