Backup before continuing from Codex 5.2 session - User storage, compaction suggestions, streaming improvements

This commit is contained in:
Gemini AI
2025-12-24 21:27:05 +04:00
Unverified
parent f9748391a9
commit e8c38b0add
93 changed files with 10615 additions and 2037 deletions

View File

@@ -79,6 +79,37 @@ export type WorkspaceCreateResponse = WorkspaceDescriptor
export type WorkspaceListResponse = WorkspaceDescriptor[]
export type WorkspaceDetailResponse = WorkspaceDescriptor
export interface WorkspaceExportRequest {
destination: string
includeConfig?: boolean
}
export interface WorkspaceExportResponse {
destination: string
}
export interface WorkspaceImportRequest {
source: string
destination: string
includeConfig?: boolean
}
export type WorkspaceImportResponse = WorkspaceDescriptor
export interface WorkspaceMcpConfig {
mcpServers?: Record<string, unknown>
}
export interface WorkspaceMcpConfigResponse {
path: string
exists: boolean
config: WorkspaceMcpConfig
}
export interface WorkspaceMcpConfigRequest {
config: WorkspaceMcpConfig
}
export interface WorkspaceDeleteResponse {
id: string
status: WorkspaceStatus
@@ -159,6 +190,11 @@ export interface InstanceData {
agentModelSelections: AgentModelSelection
sessionTasks?: SessionTasks // Multi-task chat support: tasks per session
sessionSkills?: Record<string, SkillSelection[]> // Selected skills per session
customAgents?: Array<{
name: string
description?: string
prompt: string
}>
}
export type InstanceStreamStatus = "connecting" | "connected" | "error" | "disconnected"
@@ -269,6 +305,10 @@ export interface ServerMeta {
latestRelease?: LatestReleaseInfo
}
export interface PortAvailabilityResponse {
port: number
}
export type {
Preferences,
ModelPreference,

View File

@@ -16,6 +16,7 @@ import { ServerMeta } from "./api-types"
import { InstanceStore } from "./storage/instance-store"
import { InstanceEventBridge } from "./workspaces/instance-events"
import { createLogger } from "./logger"
import { getUserConfigPath } from "./user-data"
import { launchInBrowser } from "./launcher"
import { startReleaseMonitor } from "./releases/release-monitor"
@@ -41,7 +42,7 @@ interface CliOptions {
const DEFAULT_PORT = 9898
const DEFAULT_HOST = "127.0.0.1"
const DEFAULT_CONFIG_PATH = "~/.config/codenomad/config.json"
const DEFAULT_CONFIG_PATH = getUserConfigPath()
function parseCliOptions(argv: string[]): CliOptions {
const program = new Command()

View File

@@ -1,11 +1,5 @@
/**
* Ollama Cloud API Integration
* Provides access to Ollama's cloud models through API
*/
import { z } from "zod"
// Configuration schema for Ollama Cloud
export const OllamaCloudConfigSchema = z.object({
apiKey: z.string().optional(),
endpoint: z.string().default("https://ollama.com"),
@@ -14,31 +8,56 @@ export const OllamaCloudConfigSchema = z.object({
export type OllamaCloudConfig = z.infer<typeof OllamaCloudConfigSchema>
// Model information schema
// Schema is flexible since Ollama Cloud may return different fields than local Ollama
export const OllamaModelSchema = z.object({
name: z.string(),
size: z.string(),
digest: z.string(),
modified_at: z.string(),
created_at: z.string()
model: z.string().optional(), // Some APIs return model instead of name
size: z.union([z.string(), z.number()]).optional(),
digest: z.string().optional(),
modified_at: z.string().optional(),
created_at: z.string().optional(),
details: z.any().optional() // Model details like family, parameter_size, etc.
})
export type OllamaModel = z.infer<typeof OllamaModelSchema>
// Chat message schema
export const ChatMessageSchema = z.object({
role: z.enum(["user", "assistant", "system"]),
content: z.string(),
images: z.array(z.string()).optional()
images: z.array(z.string()).optional(),
tool_calls: z.array(z.any()).optional(),
thinking: z.string().optional()
})
export type ChatMessage = z.infer<typeof ChatMessageSchema>
// Chat request/response schemas
export const ToolCallSchema = z.object({
name: z.string(),
arguments: z.record(z.any())
})
export type ToolCall = z.infer<typeof ToolCallSchema>
export const ToolDefinitionSchema = z.object({
name: z.string(),
description: z.string(),
parameters: z.object({
type: z.enum(["object", "string", "number", "boolean", "array"]),
properties: z.record(z.any()),
required: z.array(z.string()).optional()
})
})
export type ToolDefinition = z.infer<typeof ToolDefinitionSchema>
export const ChatRequestSchema = z.object({
model: z.string(),
messages: z.array(ChatMessageSchema),
stream: z.boolean().default(false),
think: z.union([z.boolean(), z.enum(["low", "medium", "high"])]).optional(),
format: z.union([z.literal("json"), z.any()]).optional(),
tools: z.array(ToolDefinitionSchema).optional(),
web_search: z.boolean().optional(),
options: z.object({
temperature: z.number().min(0).max(2).optional(),
top_p: z.number().min(0).max(1).optional()
@@ -48,7 +67,10 @@ export const ChatRequestSchema = z.object({
export const ChatResponseSchema = z.object({
model: z.string(),
created_at: z.string(),
message: ChatMessageSchema,
message: ChatMessageSchema.extend({
thinking: z.string().optional(),
tool_calls: z.array(z.any()).optional()
}),
done: z.boolean().optional(),
total_duration: z.number().optional(),
load_duration: z.number().optional(),
@@ -61,23 +83,32 @@ export const ChatResponseSchema = z.object({
export type ChatRequest = z.infer<typeof ChatRequestSchema>
export type ChatResponse = z.infer<typeof ChatResponseSchema>
export const EmbeddingRequestSchema = z.object({
model: z.string(),
input: z.union([z.string(), z.array(z.string())])
})
export type EmbeddingRequest = z.infer<typeof EmbeddingRequestSchema>
export const EmbeddingResponseSchema = z.object({
model: z.string(),
embeddings: z.array(z.array(z.number()))
})
export type EmbeddingResponse = z.infer<typeof EmbeddingResponseSchema>
export class OllamaCloudClient {
private config: OllamaCloudConfig
private baseUrl: string
constructor(config: OllamaCloudConfig) {
this.config = config
this.baseUrl = config.endpoint.replace(/\/$/, "") // Remove trailing slash
this.baseUrl = config.endpoint.replace(/\/$/, "")
}
/**
* Test connection to Ollama Cloud API
*/
async testConnection(): Promise<boolean> {
try {
const response = await this.makeRequest("/api/tags", {
method: "GET"
})
const response = await this.makeRequest("/tags", { method: "GET" })
return response.ok
} catch (error) {
console.error("Ollama Cloud connection test failed:", error)
@@ -85,30 +116,85 @@ export class OllamaCloudClient {
}
}
/**
* List available models
*/
async listModels(): Promise<OllamaModel[]> {
try {
const response = await this.makeRequest("/api/tags", {
method: "GET"
const headers: Record<string, string> = {}
if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
const cloudResponse = await fetch(`${this.baseUrl}/v1/models`, {
method: "GET",
headers
})
if (cloudResponse.ok) {
const data = await cloudResponse.json()
const modelsArray = Array.isArray(data?.data) ? data.data : []
const parsedModels = modelsArray
.map((model: any) => ({
name: model.id || model.name || model.model,
model: model.id || model.model || model.name,
}))
.filter((model: any) => model.name)
if (parsedModels.length > 0) {
return parsedModels
}
}
const response = await this.makeRequest("/tags", { method: "GET" })
if (!response.ok) {
throw new Error(`Failed to fetch models: ${response.statusText}`)
const errorText = await response.text().catch(() => "Unknown error")
console.error(`[OllamaCloud] Failed to fetch models: ${response.status} ${response.statusText}`, errorText)
throw new Error(`Failed to fetch models: ${response.status} ${response.statusText} - ${errorText}`)
}
const data = await response.json()
return z.array(OllamaModelSchema).parse(data.models || [])
console.log("[OllamaCloud] Models response:", JSON.stringify(data).substring(0, 500))
// Handle different response formats flexibly
const modelsArray = Array.isArray(data.models) ? data.models :
Array.isArray(data) ? data : []
// Parse with flexible schema, don't throw on validation failure
// Only include cloud-compatible models (ending in -cloud or known cloud models)
const parsedModels: OllamaModel[] = []
for (const model of modelsArray) {
try {
const modelName = model.name || model.model || ""
// Filter to only cloud-compatible models
const isCloudModel = modelName.endsWith("-cloud") ||
modelName.includes(":cloud") ||
modelName.startsWith("gpt-oss") ||
modelName.startsWith("qwen3-coder") ||
modelName.startsWith("deepseek-v3")
if (modelName && isCloudModel) {
parsedModels.push({
name: modelName,
model: model.model || modelName,
size: model.size,
digest: model.digest,
modified_at: model.modified_at,
created_at: model.created_at,
details: model.details
})
}
} catch (parseError) {
console.warn("[OllamaCloud] Skipping model due to parse error:", model, parseError)
}
}
console.log(`[OllamaCloud] Parsed ${parsedModels.length} cloud-compatible models`)
return parsedModels
} catch (error) {
console.error("Failed to list Ollama Cloud models:", error)
throw error
}
}
/**
* Generate chat completion
*/
async chat(request: ChatRequest): Promise<AsyncIterable<ChatResponse>> {
if (!this.config.apiKey) {
throw new Error("Ollama Cloud API key is required")
@@ -118,20 +204,20 @@ export class OllamaCloudClient {
"Content-Type": "application/json"
}
// Add authorization header if API key is provided
if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
try {
const response = await fetch(`${this.baseUrl}/api/chat`, {
const response = await this.makeRequest("/chat", {
method: "POST",
headers,
body: JSON.stringify(request)
})
if (!response.ok) {
throw new Error(`Chat request failed: ${response.statusText}`)
const errorText = await response.text()
throw new Error(`Chat request failed: ${response.statusText} - ${errorText}`)
}
if (request.stream) {
@@ -146,9 +232,85 @@ export class OllamaCloudClient {
}
}
/**
* Pull a model (for cloud models, this just makes them available)
*/
async chatWithThinking(request: ChatRequest): Promise<AsyncIterable<ChatResponse>> {
const requestWithThinking = {
...request,
think: true
}
return this.chat(requestWithThinking)
}
async chatWithStructuredOutput(request: ChatRequest, schema: any): Promise<AsyncIterable<ChatResponse>> {
const requestWithFormat = {
...request,
format: schema
}
return this.chat(requestWithFormat)
}
async chatWithVision(request: ChatRequest, images: string[]): Promise<AsyncIterable<ChatResponse>> {
if (!request.messages.length) {
throw new Error("At least one message is required")
}
const messagesWithImages = [...request.messages]
const lastUserMessage = messagesWithImages.slice().reverse().find(m => m.role === "user")
if (lastUserMessage) {
lastUserMessage.images = images
}
return this.chat({ ...request, messages: messagesWithImages })
}
async chatWithTools(request: ChatRequest, tools: ToolDefinition[]): Promise<AsyncIterable<ChatResponse>> {
const requestWithTools = {
...request,
tools
}
return this.chat(requestWithTools)
}
async chatWithWebSearch(request: ChatRequest): Promise<AsyncIterable<ChatResponse>> {
const requestWithWebSearch = {
...request,
web_search: true
}
return this.chat(requestWithWebSearch)
}
async generateEmbeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {
if (!this.config.apiKey) {
throw new Error("Ollama Cloud API key is required")
}
const headers: Record<string, string> = {
"Content-Type": "application/json"
}
if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
try {
const response = await this.makeRequest("/embed", {
method: "POST",
headers,
body: JSON.stringify(request)
})
if (!response.ok) {
throw new Error(`Embeddings request failed: ${response.statusText}`)
}
const data = await response.json()
return EmbeddingResponseSchema.parse(data)
} catch (error) {
console.error("Ollama Cloud embeddings request failed:", error)
throw error
}
}
async pullModel(modelName: string): Promise<void> {
const headers: Record<string, string> = {
"Content-Type": "application/json"
@@ -158,7 +320,7 @@ export class OllamaCloudClient {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
const response = await fetch(`${this.baseUrl}/api/pull`, {
const response = await this.makeRequest("/pull", {
method: "POST",
headers,
body: JSON.stringify({ name: modelName })
@@ -169,9 +331,6 @@ export class OllamaCloudClient {
}
}
/**
* Parse streaming response
*/
private async *parseStreamingResponse(response: Response): AsyncIterable<ChatResponse> {
if (!response.body) {
throw new Error("Response body is missing")
@@ -186,18 +345,17 @@ export class OllamaCloudClient {
if (done) break
const lines = decoder.decode(value, { stream: true }).split('\n').filter(line => line.trim())
for (const line of lines) {
try {
const data = JSON.parse(line)
const chatResponse = ChatResponseSchema.parse(data)
yield chatResponse
if (chatResponse.done) {
return
}
} catch (parseError) {
// Skip invalid JSON lines
console.warn("Failed to parse streaming line:", line, parseError)
}
}
@@ -207,61 +365,72 @@ export class OllamaCloudClient {
}
}
/**
* Create async iterable from array
*/
private async *createAsyncIterable<T>(items: T[]): AsyncIterable<T> {
for (const item of items) {
yield item
}
}
/**
* Make authenticated request to API
*/
private async makeRequest(endpoint: string, options: RequestInit): Promise<Response> {
const url = `${this.baseUrl}${endpoint}`
// Ensure endpoint starts with /api
const apiEndpoint = endpoint.startsWith('/api') ? endpoint : `/api${endpoint}`
const url = `${this.baseUrl}${apiEndpoint}`
const headers: Record<string, string> = {
...options.headers as Record<string, string>
}
// Add authorization header if API key is provided
if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
console.log(`[OllamaCloud] Making request to: ${url}`)
return fetch(url, {
...options,
headers
})
}
/**
* Get cloud-specific models (models ending with -cloud)
*/
async getCloudModels(): Promise<OllamaModel[]> {
const allModels = await this.listModels()
return allModels.filter(model => model.name.endsWith("-cloud"))
}
/**
* Validate API key format
*/
static validateApiKey(apiKey: string): boolean {
return typeof apiKey === "string" && apiKey.length > 0
}
/**
* Get available cloud model names
*/
async getCloudModelNames(): Promise<string[]> {
const cloudModels = await this.getCloudModels()
return cloudModels.map(model => model.name)
}
async getThinkingCapableModels(): Promise<string[]> {
const allModels = await this.listModels()
const thinkingModelPatterns = ["qwen3", "deepseek-r1", "gpt-oss", "deepseek-v3.1"]
return allModels
.map(m => m.name)
.filter(name => thinkingModelPatterns.some(pattern => name.toLowerCase().includes(pattern)))
}
async getVisionCapableModels(): Promise<string[]> {
const allModels = await this.listModels()
const visionModelPatterns = ["gemma3", "llama3.2-vision", "llava", "bakllava", "minicpm-v"]
return allModels
.map(m => m.name)
.filter(name => visionModelPatterns.some(pattern => name.toLowerCase().includes(pattern)))
}
async getEmbeddingModels(): Promise<string[]> {
const allModels = await this.listModels()
const embeddingModelPatterns = ["embeddinggemma", "qwen3-embedding", "all-minilm", "nomic-embed", "mxbai-embed"]
return allModels
.map(m => m.name)
.filter(name => embeddingModelPatterns.some(pattern => name.toLowerCase().includes(pattern)))
}
}
// Default cloud models based on Ollama documentation
export const DEFAULT_CLOUD_MODELS = [
"gpt-oss:120b-cloud",
"llama3.1:70b-cloud",
@@ -270,4 +439,32 @@ export const DEFAULT_CLOUD_MODELS = [
"qwen2.5:7b-cloud"
] as const
export type CloudModelName = typeof DEFAULT_CLOUD_MODELS[number]
export type CloudModelName = typeof DEFAULT_CLOUD_MODELS[number]
export const THINKING_MODELS = [
"qwen3",
"deepseek-r1",
"deepseek-v3.1",
"gpt-oss:120b-cloud"
] as const
export type ThinkingModelName = typeof THINKING_MODELS[number]
export const VISION_MODELS = [
"gemma3",
"llava",
"bakllava",
"minicpm-v"
] as const
export type VisionModelName = typeof VISION_MODELS[number]
export const EMBEDDING_MODELS = [
"embeddinggemma",
"qwen3-embedding",
"all-minilm",
"nomic-embed-text",
"mxbai-embed-large"
] as const
export type EmbeddingModelName = typeof EMBEDDING_MODELS[number]

View File

@@ -11,8 +11,8 @@ import { z } from "zod"
// Configuration schema for OpenCode Zen
export const OpenCodeZenConfigSchema = z.object({
enabled: z.boolean().default(true), // Free models enabled by default
endpoint: z.string().default("https://api.opencode.ai/v1"),
apiKey: z.string().default("public") // "public" key for free models
endpoint: z.string().default("https://opencode.ai/zen/v1"),
apiKey: z.string().optional()
})
export type OpenCodeZenConfig = z.infer<typeof OpenCodeZenConfigSchema>
@@ -104,10 +104,10 @@ export const FREE_ZEN_MODELS: ZenModel[] = [
attachment: false,
temperature: true,
cost: { input: 0, output: 0 },
limit: { context: 128000, output: 16384 }
limit: { context: 200000, output: 128000 }
},
{
id: "grok-code-fast-1",
id: "grok-code",
name: "Grok Code Fast 1",
family: "grok",
reasoning: true,
@@ -115,18 +115,29 @@ export const FREE_ZEN_MODELS: ZenModel[] = [
attachment: false,
temperature: true,
cost: { input: 0, output: 0 },
limit: { context: 256000, output: 10000 }
limit: { context: 256000, output: 256000 }
},
{
id: "minimax-m2.1",
name: "MiniMax M2.1",
family: "minimax",
id: "glm-4.7-free",
name: "GLM-4.7",
family: "glm-free",
reasoning: true,
tool_call: true,
attachment: false,
temperature: true,
cost: { input: 0, output: 0 },
limit: { context: 205000, output: 131072 }
limit: { context: 204800, output: 131072 }
},
{
id: "alpha-doubao-seed-code",
name: "Doubao Seed Code (alpha)",
family: "doubao",
reasoning: true,
tool_call: true,
attachment: false,
temperature: true,
cost: { input: 0, output: 0 },
limit: { context: 256000, output: 32000 }
}
]
@@ -217,13 +228,19 @@ export class OpenCodeZenClient {
* Chat completion (streaming)
*/
async *chatStream(request: ChatRequest): AsyncGenerator<ChatChunk> {
const headers: Record<string, string> = {
"Content-Type": "application/json",
"User-Agent": "NomadArch/1.0",
"HTTP-Referer": "https://opencode.ai/",
"X-Title": "NomadArch"
}
if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.config.apiKey}`,
"User-Agent": "NomadArch/1.0"
},
headers,
body: JSON.stringify({
...request,
stream: true
@@ -281,13 +298,19 @@ export class OpenCodeZenClient {
* Chat completion (non-streaming)
*/
async chat(request: ChatRequest): Promise<ChatChunk> {
const headers: Record<string, string> = {
"Content-Type": "application/json",
"User-Agent": "NomadArch/1.0",
"HTTP-Referer": "https://opencode.ai/",
"X-Title": "NomadArch"
}
if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.config.apiKey}`,
"User-Agent": "NomadArch/1.0"
},
headers,
body: JSON.stringify({
...request,
stream: false
@@ -306,7 +329,6 @@ export class OpenCodeZenClient {
export function getDefaultZenConfig(): OpenCodeZenConfig {
return {
enabled: true,
endpoint: "https://api.opencode.ai/v1",
apiKey: "public"
endpoint: "https://opencode.ai/zen/v1"
}
}

View File

@@ -1,113 +1,111 @@
/**
* Z.AI API Integration
* Provides access to Z.AI's GLM Coding Plan API (Anthropic-compatible)
* Based on https://docs.z.ai/devpack/tool/claude#step-2-config-glm-coding-plan
*/
import { z } from "zod"
// Configuration schema for Z.AI
export const ZAIConfigSchema = z.object({
apiKey: z.string().optional(),
endpoint: z.string().default("https://api.z.ai/api/anthropic"),
endpoint: z.string().default("https://api.z.ai/api/paas/v4"),
enabled: z.boolean().default(false),
timeout: z.number().default(3000000) // 50 minutes as per docs
timeout: z.number().default(300000)
})
export type ZAIConfig = z.infer<typeof ZAIConfigSchema>
// Message schema (Anthropic-compatible)
export const ZAIMessageSchema = z.object({
role: z.enum(["user", "assistant"]),
role: z.enum(["user", "assistant", "system"]),
content: z.string()
})
export type ZAIMessage = z.infer<typeof ZAIMessageSchema>
// Chat request schema
export const ZAIChatRequestSchema = z.object({
model: z.string().default("claude-sonnet-4-20250514"),
model: z.string().default("glm-4.7"),
messages: z.array(ZAIMessageSchema),
max_tokens: z.number().default(8192),
stream: z.boolean().default(true),
system: z.string().optional()
temperature: z.number().optional(),
thinking: z.object({
type: z.enum(["enabled", "disabled"]).optional()
}).optional()
})
export type ZAIChatRequest = z.infer<typeof ZAIChatRequestSchema>
// Chat response schema
export const ZAIChatResponseSchema = z.object({
id: z.string(),
type: z.string(),
role: z.string(),
content: z.array(z.object({
type: z.string(),
text: z.string().optional()
})),
object: z.string(),
created: z.number(),
model: z.string(),
stop_reason: z.string().nullable().optional(),
stop_sequence: z.string().nullable().optional(),
choices: z.array(z.object({
index: z.number(),
message: z.object({
role: z.string(),
content: z.string().optional(),
reasoning_content: z.string().optional()
}),
finish_reason: z.string()
})),
usage: z.object({
input_tokens: z.number(),
output_tokens: z.number()
}).optional()
prompt_tokens: z.number(),
completion_tokens: z.number(),
total_tokens: z.number()
})
})
export type ZAIChatResponse = z.infer<typeof ZAIChatResponseSchema>
// Stream chunk schema
export const ZAIStreamChunkSchema = z.object({
type: z.string(),
index: z.number().optional(),
delta: z.object({
type: z.string().optional(),
text: z.string().optional()
}).optional(),
message: z.object({
id: z.string(),
type: z.string(),
role: z.string(),
content: z.array(z.any()),
model: z.string()
}).optional(),
content_block: z.object({
type: z.string(),
text: z.string()
}).optional()
id: z.string(),
object: z.string(),
created: z.number(),
model: z.string(),
choices: z.array(z.object({
index: z.number(),
delta: z.object({
role: z.string().optional(),
content: z.string().optional(),
reasoning_content: z.string().optional()
}),
finish_reason: z.string().nullable().optional()
}))
})
export type ZAIStreamChunk = z.infer<typeof ZAIStreamChunkSchema>
export const ZAI_MODELS = [
"glm-4.7",
"glm-4.6",
"glm-4.5",
"glm-4.5-air",
"glm-4.5-flash",
"glm-4.5-long"
] as const
export type ZAIModelName = typeof ZAI_MODELS[number]
export class ZAIClient {
private config: ZAIConfig
private baseUrl: string
constructor(config: ZAIConfig) {
this.config = config
this.baseUrl = config.endpoint.replace(/\/$/, "") // Remove trailing slash
this.baseUrl = config.endpoint.replace(/\/$/, "")
}
/**
* Test connection to Z.AI API
*/
async testConnection(): Promise<boolean> {
if (!this.config.apiKey) {
return false
}
try {
// Make a minimal request to test auth
const response = await fetch(`${this.baseUrl}/v1/messages`, {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: "POST",
headers: this.getHeaders(),
body: JSON.stringify({
model: "claude-sonnet-4-20250514",
model: "glm-4.7",
max_tokens: 1,
messages: [{ role: "user", content: "test" }]
})
})
// Any response other than auth error means connection works
return response.status !== 401 && response.status !== 403
} catch (error) {
console.error("Z.AI connection test failed:", error)
@@ -115,28 +113,16 @@ export class ZAIClient {
}
}
/**
* List available models
*/
async listModels(): Promise<string[]> {
// Z.AI provides access to Claude models through their proxy
return [
"claude-sonnet-4-20250514",
"claude-3-5-sonnet-20241022",
"claude-3-opus-20240229",
"claude-3-haiku-20240307"
]
return [...ZAI_MODELS]
}
/**
* Chat completion (streaming)
*/
async *chatStream(request: ZAIChatRequest): AsyncGenerator<ZAIStreamChunk> {
if (!this.config.apiKey) {
throw new Error("Z.AI API key is required")
}
const response = await fetch(`${this.baseUrl}/v1/messages`, {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: "POST",
headers: this.getHeaders(),
body: JSON.stringify({
@@ -165,7 +151,7 @@ export class ZAIClient {
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split("\n")
buffer = lines.pop() || "" // Keep incomplete line in buffer
buffer = lines.pop() || ""
for (const line of lines) {
if (line.startsWith("data: ")) {
@@ -176,7 +162,6 @@ export class ZAIClient {
const parsed = JSON.parse(data)
yield parsed as ZAIStreamChunk
} catch (e) {
// Skip invalid JSON
}
}
}
@@ -186,15 +171,12 @@ export class ZAIClient {
}
}
/**
* Chat completion (non-streaming)
*/
async chat(request: ZAIChatRequest): Promise<ZAIChatResponse> {
if (!this.config.apiKey) {
throw new Error("Z.AI API key is required")
}
const response = await fetch(`${this.baseUrl}/v1/messages`, {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: "POST",
headers: this.getHeaders(),
body: JSON.stringify({
@@ -211,31 +193,14 @@ export class ZAIClient {
return await response.json()
}
/**
* Get request headers
*/
private getHeaders(): Record<string, string> {
return {
"Content-Type": "application/json",
"x-api-key": this.config.apiKey || "",
"anthropic-version": "2023-06-01"
"Authorization": `Bearer ${this.config.apiKey}`
}
}
/**
* Validate API key
*/
static validateApiKey(apiKey: string): boolean {
return typeof apiKey === "string" && apiKey.length > 0
}
}
// Default available models
export const ZAI_MODELS = [
"claude-sonnet-4-20250514",
"claude-3-5-sonnet-20241022",
"claude-3-opus-20240229",
"claude-3-haiku-20240307"
] as const
export type ZAIModelName = typeof ZAI_MODELS[number]
}

View File

@@ -3,6 +3,7 @@ import os from "os"
import path from "path"
import { fileURLToPath } from "url"
import { createLogger } from "./logger"
import { getOpencodeWorkspacesRoot, getUserDataRoot } from "./user-data"
const log = createLogger({ component: "opencode-config" })
const __filename = fileURLToPath(import.meta.url)
@@ -12,7 +13,8 @@ const prodTemplateDir = path.resolve(__dirname, "opencode-config")
const isDevBuild = Boolean(process.env.CODENOMAD_DEV ?? process.env.CLI_UI_DEV_SERVER) || existsSync(devTemplateDir)
const templateDir = isDevBuild ? devTemplateDir : prodTemplateDir
const userConfigDir = path.join(os.homedir(), ".config", "codenomad", "opencode-config")
const userConfigDir = path.join(getUserDataRoot(), "opencode-config")
const workspaceConfigRoot = getOpencodeWorkspacesRoot()
export function getOpencodeConfigDir(): string {
if (!existsSync(templateDir)) {
@@ -28,6 +30,28 @@ export function getOpencodeConfigDir(): string {
return userConfigDir
}
export function ensureWorkspaceOpencodeConfig(workspaceId: string): string {
if (!workspaceId) {
return getOpencodeConfigDir()
}
if (!existsSync(templateDir)) {
throw new Error(`CodeNomad Opencode config template missing at ${templateDir}`)
}
const targetDir = path.join(workspaceConfigRoot, workspaceId)
if (existsSync(targetDir)) {
return targetDir
}
mkdirSync(path.dirname(targetDir), { recursive: true })
cpSync(templateDir, targetDir, { recursive: true })
return targetDir
}
export function getWorkspaceOpencodeConfigDir(workspaceId: string): string {
return path.join(workspaceConfigRoot, workspaceId)
}
function refreshUserConfig() {
log.debug({ templateDir, userConfigDir }, "Syncing Opencode config template")
rmSync(userConfigDir, { recursive: true, force: true })

View File

@@ -105,7 +105,11 @@ export function createHttpServer(deps: HttpServerDeps) {
},
})
registerWorkspaceRoutes(app, { workspaceManager: deps.workspaceManager })
registerWorkspaceRoutes(app, {
workspaceManager: deps.workspaceManager,
instanceStore: deps.instanceStore,
configStore: deps.configStore,
})
registerConfigRoutes(app, { configStore: deps.configStore, binaryRegistry: deps.binaryRegistry })
registerFilesystemRoutes(app, { fileSystemBrowser: deps.fileSystemBrowser })
registerMetaRoutes(app, { serverMeta: deps.serverMeta })
@@ -119,7 +123,7 @@ export function createHttpServer(deps: HttpServerDeps) {
registerQwenRoutes(app, { logger: deps.logger })
registerZAIRoutes(app, { logger: deps.logger })
registerOpenCodeZenRoutes(app, { logger: deps.logger })
await registerSkillsRoutes(app)
registerSkillsRoutes(app)
registerInstanceProxyRoutes(app, { workspaceManager: deps.workspaceManager, logger: proxyLogger })

View File

@@ -1,6 +1,7 @@
import { FastifyInstance } from "fastify"
import os from "os"
import { NetworkAddress, ServerMeta } from "../../api-types"
import { NetworkAddress, ServerMeta, PortAvailabilityResponse } from "../../api-types"
import { getAvailablePort } from "../../utils/port"
interface RouteDeps {
serverMeta: ServerMeta
@@ -8,6 +9,11 @@ interface RouteDeps {
export function registerMetaRoutes(app: FastifyInstance, deps: RouteDeps) {
app.get("/api/meta", async () => buildMetaResponse(deps.serverMeta))
app.get("/api/ports/available", async () => {
const port = await getAvailablePort(3000)
const response: PortAvailabilityResponse = { port }
return response
})
}
function buildMetaResponse(meta: ServerMeta): ServerMeta {

View File

@@ -1,6 +1,18 @@
import { FastifyInstance, FastifyReply } from "fastify"
import { OllamaCloudClient, type OllamaCloudConfig, type ChatRequest } from "../../integrations/ollama-cloud"
import {
OllamaCloudClient,
type OllamaCloudConfig,
type ChatRequest,
type EmbeddingRequest,
type ToolDefinition
} from "../../integrations/ollama-cloud"
import { Logger } from "../../logger"
import fs from "fs"
import path from "path"
import { getUserIntegrationsDir } from "../../user-data"
const CONFIG_DIR = getUserIntegrationsDir()
const CONFIG_FILE = path.join(CONFIG_DIR, "ollama-config.json")
interface OllamaRouteDeps {
logger: Logger
@@ -12,7 +24,6 @@ export async function registerOllamaRoutes(
) {
const logger = deps.logger.child({ component: "ollama-routes" })
// Get Ollama Cloud configuration
app.get('/api/ollama/config', async (request, reply) => {
try {
const config = getOllamaConfig()
@@ -23,15 +34,16 @@ export async function registerOllamaRoutes(
}
})
// Update Ollama Cloud configuration
app.post('/api/ollama/config', {
schema: {
type: 'object',
required: ['enabled'],
properties: {
enabled: { type: 'boolean' },
apiKey: { type: 'string' },
endpoint: { type: 'string' }
body: {
type: 'object',
required: ['enabled'],
properties: {
enabled: { type: 'boolean' },
apiKey: { type: 'string' },
endpoint: { type: 'string' }
}
}
}
}, async (request, reply) => {
@@ -46,7 +58,6 @@ export async function registerOllamaRoutes(
}
})
// Test Ollama Cloud connection
app.post('/api/ollama/test', async (request, reply) => {
try {
const config = getOllamaConfig()
@@ -56,7 +67,7 @@ export async function registerOllamaRoutes(
const client = new OllamaCloudClient(config)
const isConnected = await client.testConnection()
return { connected: isConnected }
} catch (error) {
logger.error({ error }, "Ollama Cloud connection test failed")
@@ -64,7 +75,6 @@ export async function registerOllamaRoutes(
}
})
// List available models
app.get('/api/ollama/models', async (request, reply) => {
try {
const config = getOllamaConfig()
@@ -72,17 +82,19 @@ export async function registerOllamaRoutes(
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
logger.info({ endpoint: config.endpoint, hasApiKey: !!config.apiKey }, "Fetching Ollama models")
const client = new OllamaCloudClient(config)
const models = await client.listModels()
logger.info({ modelCount: models.length }, "Ollama models fetched successfully")
return { models }
} catch (error) {
logger.error({ error }, "Failed to list Ollama models")
return reply.status(500).send({ error: "Failed to list models" })
} catch (error: any) {
logger.error({ error: error?.message || error }, "Failed to list Ollama models")
return reply.status(500).send({ error: error?.message || "Failed to list models" })
}
})
// Get cloud models only
app.get('/api/ollama/models/cloud', async (request, reply) => {
try {
const config = getOllamaConfig()
@@ -92,7 +104,7 @@ export async function registerOllamaRoutes(
const client = new OllamaCloudClient(config)
const cloudModels = await client.getCloudModels()
return { models: cloudModels }
} catch (error) {
logger.error({ error }, "Failed to list cloud models")
@@ -100,30 +112,86 @@ export async function registerOllamaRoutes(
}
})
// Chat completion endpoint
app.get('/api/ollama/models/thinking', async (request, reply) => {
try {
const config = getOllamaConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
const client = new OllamaCloudClient(config)
const thinkingModels = await client.getThinkingCapableModels()
return { models: thinkingModels }
} catch (error) {
logger.error({ error }, "Failed to list thinking models")
return reply.status(500).send({ error: "Failed to list thinking models" })
}
})
app.get('/api/ollama/models/vision', async (request, reply) => {
try {
const config = getOllamaConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
const client = new OllamaCloudClient(config)
const visionModels = await client.getVisionCapableModels()
return { models: visionModels }
} catch (error) {
logger.error({ error }, "Failed to list vision models")
return reply.status(500).send({ error: "Failed to list vision models" })
}
})
app.get('/api/ollama/models/embedding', async (request, reply) => {
try {
const config = getOllamaConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
const client = new OllamaCloudClient(config)
const embeddingModels = await client.getEmbeddingModels()
return { models: embeddingModels }
} catch (error) {
logger.error({ error }, "Failed to list embedding models")
return reply.status(500).send({ error: "Failed to list embedding models" })
}
})
app.post('/api/ollama/chat', {
schema: {
type: 'object',
required: ['model', 'messages'],
properties: {
model: { type: 'string' },
messages: {
type: 'array',
items: {
type: 'object',
required: ['role', 'content'],
properties: {
role: { type: 'string', enum: ['user', 'assistant', 'system'] },
content: { type: 'string' }
body: {
type: 'object',
required: ['model', 'messages'],
properties: {
model: { type: 'string' },
messages: {
type: 'array',
items: {
type: 'object',
required: ['role', 'content'],
properties: {
role: { type: 'string', enum: ['user', 'assistant', 'system'] },
content: { type: 'string' }
}
}
},
stream: { type: 'boolean' },
think: { type: ['boolean', 'string'] },
format: { type: ['string', 'object'] },
tools: { type: 'array' },
web_search: { type: 'boolean' },
options: {
type: 'object',
properties: {
temperature: { type: 'number', minimum: 0, maximum: 2 },
top_p: { type: 'number', minimum: 0, maximum: 1 }
}
}
},
stream: { type: 'boolean' },
options: {
type: 'object',
properties: {
temperature: { type: 'number', minimum: 0, maximum: 2 },
top_p: { type: 'number', minimum: 0, maximum: 1 }
}
}
}
@@ -137,8 +205,7 @@ export async function registerOllamaRoutes(
const client = new OllamaCloudClient(config)
const chatRequest = request.body as ChatRequest
// Set appropriate headers for streaming
if (chatRequest.stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
@@ -148,24 +215,31 @@ export async function registerOllamaRoutes(
try {
const stream = await client.chat(chatRequest)
for await (const chunk of stream) {
reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`)
if (chunk.done) {
reply.raw.write('data: [DONE]\n\n')
break
}
}
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "Streaming failed")
} catch (streamError: any) {
logger.error({ error: streamError?.message || streamError }, "Ollama streaming failed")
// Send error event to client so it knows the request failed
reply.raw.write(`data: ${JSON.stringify({ error: streamError?.message || "Streaming failed" })}\n\n`)
reply.raw.write('data: [DONE]\n\n')
reply.raw.end()
}
} else {
const response = await client.chat(chatRequest)
return response
const stream = await client.chat(chatRequest)
const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}
return chunks[chunks.length - 1]
}
} catch (error) {
logger.error({ error }, "Ollama chat request failed")
@@ -173,13 +247,289 @@ export async function registerOllamaRoutes(
}
})
// Pull model endpoint
app.post('/api/ollama/chat/thinking', {
schema: {
body: {
type: 'object',
required: ['model', 'messages'],
properties: {
model: { type: 'string' },
messages: { type: 'array' },
stream: { type: 'boolean' },
think: { type: ['boolean', 'string'] }
}
}
}
}, async (request, reply) => {
try {
const config = getOllamaConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
const client = new OllamaCloudClient(config)
const chatRequest = request.body as ChatRequest
chatRequest.think = chatRequest.think ?? true
if (chatRequest.stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
})
try {
const stream = await client.chatWithThinking(chatRequest)
for await (const chunk of stream) {
reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`)
if (chunk.done) {
reply.raw.write('data: [DONE]\n\n')
break
}
}
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "Thinking streaming failed")
reply.raw.end()
}
} else {
const stream = await client.chatWithThinking(chatRequest)
const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}
return chunks[chunks.length - 1]
}
} catch (error) {
logger.error({ error }, "Ollama thinking chat request failed")
return reply.status(500).send({ error: "Thinking chat request failed" })
}
})
app.post('/api/ollama/chat/vision', {
schema: {
body: {
type: 'object',
required: ['model', 'messages', 'images'],
properties: {
model: { type: 'string' },
messages: { type: 'array' },
images: { type: 'array', items: { type: 'string' } },
stream: { type: 'boolean' }
}
}
}
}, async (request, reply) => {
try {
const config = getOllamaConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
const client = new OllamaCloudClient(config)
const { model, messages, images, stream } = request.body as any
const chatRequest: ChatRequest = { model, messages, stream: stream ?? false }
if (chatRequest.stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
})
try {
const streamResult = await client.chatWithVision(chatRequest, images)
for await (const chunk of streamResult) {
reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`)
if (chunk.done) {
reply.raw.write('data: [DONE]\n\n')
break
}
}
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "Vision streaming failed")
reply.raw.end()
}
} else {
const streamResult = await client.chatWithVision(chatRequest, images)
const chunks: any[] = []
for await (const chunk of streamResult) {
chunks.push(chunk)
}
return chunks[chunks.length - 1]
}
} catch (error) {
logger.error({ error }, "Ollama vision chat request failed")
return reply.status(500).send({ error: "Vision chat request failed" })
}
})
app.post('/api/ollama/chat/tools', {
schema: {
body: {
type: 'object',
required: ['model', 'messages', 'tools'],
properties: {
model: { type: 'string' },
messages: { type: 'array' },
tools: { type: 'array' },
stream: { type: 'boolean' }
}
}
}
}, async (request, reply) => {
try {
const config = getOllamaConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
const client = new OllamaCloudClient(config)
const { model, messages, tools, stream } = request.body as any
const chatRequest: ChatRequest = { model, messages, stream: stream ?? false }
if (chatRequest.stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
})
try {
const streamResult = await client.chatWithTools(chatRequest, tools)
for await (const chunk of streamResult) {
reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`)
if (chunk.done) {
reply.raw.write('data: [DONE]\n\n')
break
}
}
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "Tools streaming failed")
reply.raw.end()
}
} else {
const streamResult = await client.chatWithTools(chatRequest, tools)
const chunks: any[] = []
for await (const chunk of streamResult) {
chunks.push(chunk)
}
return chunks[chunks.length - 1]
}
} catch (error) {
logger.error({ error }, "Ollama tools chat request failed")
return reply.status(500).send({ error: "Tools chat request failed" })
}
})
app.post('/api/ollama/chat/websearch', {
schema: {
body: {
type: 'object',
required: ['model', 'messages'],
properties: {
model: { type: 'string' },
messages: { type: 'array' },
stream: { type: 'boolean' }
}
}
}
}, async (request, reply) => {
try {
const config = getOllamaConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
const client = new OllamaCloudClient(config)
const chatRequest = request.body as ChatRequest
if (chatRequest.stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
})
try {
const stream = await client.chatWithWebSearch(chatRequest)
for await (const chunk of stream) {
reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`)
if (chunk.done) {
reply.raw.write('data: [DONE]\n\n')
break
}
}
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "Web search streaming failed")
reply.raw.end()
}
} else {
const stream = await client.chatWithWebSearch(chatRequest)
const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}
return chunks[chunks.length - 1]
}
} catch (error) {
logger.error({ error }, "Ollama web search chat request failed")
return reply.status(500).send({ error: "Web search chat request failed" })
}
})
app.post('/api/ollama/embeddings', {
schema: {
body: {
type: 'object',
required: ['model', 'input'],
properties: {
model: { type: 'string' },
input: { oneOf: [{ type: 'string' }, { type: 'array', items: { type: 'string' } }] }
}
}
}
}, async (request, reply) => {
try {
const config = getOllamaConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
const client = new OllamaCloudClient(config)
const embedRequest = request.body as EmbeddingRequest
const result = await client.generateEmbeddings(embedRequest)
return result
} catch (error) {
logger.error({ error }, "Ollama embeddings request failed")
return reply.status(500).send({ error: "Embeddings request failed" })
}
})
app.post('/api/ollama/pull', {
schema: {
type: 'object',
required: ['model'],
properties: {
model: { type: 'string' }
body: {
type: 'object',
required: ['model'],
properties: {
model: { type: 'string' }
}
}
}
}, async (request, reply) => {
@@ -191,12 +541,11 @@ export async function registerOllamaRoutes(
const client = new OllamaCloudClient(config)
const { model } = request.body as any
// Start async pull operation
client.pullModel(model).catch(error => {
logger.error({ error, model }, "Failed to pull model")
})
return { message: `Started pulling model: ${model}` }
} catch (error) {
logger.error({ error }, "Failed to initiate model pull")
@@ -207,18 +556,36 @@ export async function registerOllamaRoutes(
logger.info("Ollama Cloud routes registered")
}
// Configuration management functions
function getOllamaConfig(): OllamaCloudConfig {
try {
const stored = localStorage.getItem('ollama_cloud_config')
return stored ? JSON.parse(stored) : { enabled: false, endpoint: "https://ollama.com" }
if (!fs.existsSync(CONFIG_FILE)) {
return { enabled: false, endpoint: "https://ollama.com" }
}
const data = fs.readFileSync(CONFIG_FILE, 'utf-8')
return JSON.parse(data)
} catch {
return { enabled: false, endpoint: "https://ollama.com" }
}
}
function updateOllamaConfig(config: Partial<OllamaCloudConfig>): void {
const current = getOllamaConfig()
const updated = { ...current, ...config }
localStorage.setItem('ollama_cloud_config', JSON.stringify(updated))
}
try {
if (!fs.existsSync(CONFIG_DIR)) {
fs.mkdirSync(CONFIG_DIR, { recursive: true })
}
const current = getOllamaConfig()
// Only update apiKey if a new non-empty value is provided
const updated = {
...current,
...config,
// Preserve existing apiKey if new one is undefined/empty
apiKey: config.apiKey || current.apiKey
}
fs.writeFileSync(CONFIG_FILE, JSON.stringify(updated, null, 2))
console.log(`[Ollama] Config saved: enabled=${updated.enabled}, endpoint=${updated.endpoint}, hasApiKey=${!!updated.apiKey}`)
} catch (error) {
console.error("Failed to save Ollama config:", error)
}
}

View File

@@ -5,97 +5,168 @@ interface QwenRouteDeps {
logger: Logger
}
const QWEN_OAUTH_BASE_URL = 'https://chat.qwen.ai'
const QWEN_OAUTH_DEVICE_CODE_ENDPOINT = `${QWEN_OAUTH_BASE_URL}/api/v1/oauth2/device/code`
const QWEN_OAUTH_TOKEN_ENDPOINT = `${QWEN_OAUTH_BASE_URL}/api/v1/oauth2/token`
const QWEN_OAUTH_CLIENT_ID = 'f0304373b74a44d2b584a3fb70ca9e56'
const QWEN_OAUTH_SCOPE = 'openid profile email model.completion'
const QWEN_OAUTH_DEVICE_GRANT_TYPE = 'urn:ietf:params:oauth:grant-type:device_code'
const QWEN_DEFAULT_RESOURCE_URL = 'https://dashscope.aliyuncs.com/compatible-mode'
function normalizeQwenModel(model?: string): string {
const raw = (model || "").trim()
if (!raw) return "coder-model"
const lower = raw.toLowerCase()
if (lower === "vision-model" || lower.includes("vision")) return "vision-model"
if (lower === "coder-model") return "coder-model"
if (lower.includes("coder")) return "coder-model"
return "coder-model"
}
function normalizeQwenResourceUrl(resourceUrl?: string): string {
const raw = typeof resourceUrl === 'string' && resourceUrl.trim().length > 0
? resourceUrl.trim()
: QWEN_DEFAULT_RESOURCE_URL
const withProtocol = raw.startsWith('http') ? raw : `https://${raw}`
const trimmed = withProtocol.replace(/\/$/, '')
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`
}
export async function registerQwenRoutes(
app: FastifyInstance,
deps: QwenRouteDeps
) {
const logger = deps.logger.child({ component: "qwen-routes" })
// Get OAuth URL for Qwen authentication
app.get('/api/qwen/oauth/url', async (request, reply) => {
try {
const { clientId, redirectUri } = request.query as any
if (!clientId) {
return reply.status(400).send({ error: "Client ID is required" })
}
const authUrl = new URL('https://qwen.ai/oauth/authorize')
authUrl.searchParams.set('response_type', 'code')
authUrl.searchParams.set('client_id', clientId)
authUrl.searchParams.set('redirect_uri', redirectUri || `${request.protocol}//${request.host}/auth/qwen/callback`)
authUrl.searchParams.set('scope', 'read write')
authUrl.searchParams.set('state', generateState())
return { authUrl: authUrl.toString() }
} catch (error) {
logger.error({ error }, "Failed to generate OAuth URL")
return reply.status(500).send({ error: "Failed to generate OAuth URL" })
}
})
// Exchange authorization code for token
app.post('/api/qwen/oauth/exchange', {
// Qwen OAuth Device Flow: request device authorization
app.post('/api/qwen/oauth/device', {
schema: {
type: 'object',
required: ['code', 'state'],
properties: {
code: { type: 'string' },
state: { type: 'string' },
client_id: { type: 'string' },
redirect_uri: { type: 'string' }
body: {
type: 'object',
required: ['code_challenge', 'code_challenge_method'],
properties: {
code_challenge: { type: 'string' },
code_challenge_method: { type: 'string' }
}
}
}
}, async (request, reply) => {
try {
const { code, state, client_id, redirect_uri } = request.body as any
// Exchange code for token with Qwen
const tokenResponse = await fetch('https://qwen.ai/oauth/token', {
const { code_challenge, code_challenge_method } = request.body as any
const response = await fetch(QWEN_OAUTH_DEVICE_CODE_ENDPOINT, {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
},
body: new URLSearchParams({
grant_type: 'authorization_code',
client_id: client_id,
code,
redirect_uri: redirect_uri
client_id: QWEN_OAUTH_CLIENT_ID,
scope: QWEN_OAUTH_SCOPE,
code_challenge,
code_challenge_method
})
})
if (!tokenResponse.ok) {
throw new Error(`Token exchange failed: ${tokenResponse.statusText}`)
if (!response.ok) {
const errorText = await response.text()
logger.error({ status: response.status, errorText }, "Qwen device authorization failed")
return reply.status(response.status).send({ error: "Device authorization failed", details: errorText })
}
const tokenData = await tokenResponse.json()
// Get user info
const userResponse = await fetch('https://qwen.ai/api/user', {
headers: {
'Authorization': `Bearer ${tokenData.access_token}`
const data = await response.json()
return { ...data }
} catch (error) {
logger.error({ error }, "Failed to request Qwen device authorization")
return reply.status(500).send({ error: "Device authorization failed" })
}
})
// Qwen OAuth Device Flow: poll token endpoint
app.post('/api/qwen/oauth/token', {
schema: {
body: {
type: 'object',
required: ['device_code', 'code_verifier'],
properties: {
device_code: { type: 'string' },
code_verifier: { type: 'string' }
}
}
}
}, async (request, reply) => {
try {
const { device_code, code_verifier } = request.body as any
const response = await fetch(QWEN_OAUTH_TOKEN_ENDPOINT, {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
},
body: new URLSearchParams({
grant_type: QWEN_OAUTH_DEVICE_GRANT_TYPE,
client_id: QWEN_OAUTH_CLIENT_ID,
device_code,
code_verifier
})
})
if (!userResponse.ok) {
throw new Error(`Failed to fetch user info: ${userResponse.statusText}`)
const responseText = await response.text()
if (!response.ok) {
logger.error({ status: response.status, responseText }, "Qwen device token poll failed")
return reply.status(response.status).send(responseText)
}
const userData = await userResponse.json()
return {
success: true,
user: userData,
token: {
access_token: tokenData.access_token,
token_type: tokenData.token_type,
expires_in: tokenData.expires_in,
scope: tokenData.scope
}
try {
return reply.send(JSON.parse(responseText))
} catch {
return reply.send(responseText)
}
} catch (error) {
logger.error({ error }, "Qwen OAuth token exchange failed")
return reply.status(500).send({ error: "OAuth exchange failed" })
logger.error({ error }, "Failed to poll Qwen token endpoint")
return reply.status(500).send({ error: "Token polling failed" })
}
})
// Qwen OAuth refresh token
app.post('/api/qwen/oauth/refresh', {
schema: {
body: {
type: 'object',
required: ['refresh_token'],
properties: {
refresh_token: { type: 'string' }
}
}
}
}, async (request, reply) => {
try {
const { refresh_token } = request.body as any
const response = await fetch(QWEN_OAUTH_TOKEN_ENDPOINT, {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
},
body: new URLSearchParams({
grant_type: 'refresh_token',
refresh_token,
client_id: QWEN_OAUTH_CLIENT_ID
})
})
const responseText = await response.text()
if (!response.ok) {
logger.error({ status: response.status, responseText }, "Qwen token refresh failed")
return reply.status(response.status).send(responseText)
}
try {
return reply.send(JSON.parse(responseText))
} catch {
return reply.send(responseText)
}
} catch (error) {
logger.error({ error }, "Failed to refresh Qwen token")
return reply.status(500).send({ error: "Token refresh failed" })
}
})
@@ -108,7 +179,7 @@ export async function registerQwenRoutes(
}
const token = authHeader.substring(7)
const userResponse = await fetch('https://qwen.ai/api/user', {
const userResponse = await fetch('https://chat.qwen.ai/api/v1/user', {
headers: {
'Authorization': `Bearer ${token}`
}
@@ -126,9 +197,121 @@ export async function registerQwenRoutes(
}
})
// Qwen Chat API - proxy chat requests to Qwen using OAuth token
app.post('/api/qwen/chat', {
schema: {
body: {
type: 'object',
required: ['model', 'messages'],
properties: {
model: { type: 'string' },
messages: { type: 'array' },
stream: { type: 'boolean' },
resource_url: { type: 'string' }
}
}
}
}, async (request, reply) => {
try {
const authHeader = request.headers.authorization
if (!authHeader || !authHeader.startsWith('Bearer ')) {
return reply.status(401).send({ error: "Authorization required" })
}
const accessToken = authHeader.substring(7)
const { model, messages, stream, resource_url } = request.body as any
// Use resource_url from OAuth credentials to target the DashScope-compatible API
const apiBaseUrl = normalizeQwenResourceUrl(resource_url)
const normalizedModel = normalizeQwenModel(model)
const chatUrl = `${apiBaseUrl}/chat/completions`
logger.info({ chatUrl, model: normalizedModel, messageCount: messages?.length }, "Proxying Qwen chat request")
const response = await fetch(chatUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${accessToken}`,
'Accept': stream ? 'text/event-stream' : 'application/json'
},
body: JSON.stringify({
model: normalizedModel,
messages,
stream: stream || false
})
})
if (!response.ok) {
const errorText = await response.text()
logger.error({ status: response.status, errorText }, "Qwen chat request failed")
return reply.status(response.status).send({ error: "Chat request failed", details: errorText })
}
if (stream && response.body) {
// Stream the response
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
})
const reader = response.body.getReader()
const decoder = new TextDecoder()
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
reply.raw.write(chunk)
}
} finally {
reader.releaseLock()
reply.raw.end()
}
} else {
const data = await response.json()
return reply.send(data)
}
} catch (error) {
logger.error({ error }, "Qwen chat proxy failed")
return reply.status(500).send({ error: "Chat request failed" })
}
})
// Qwen Models list endpoint
app.get('/api/qwen/models', async (request, reply) => {
try {
const authHeader = request.headers.authorization
if (!authHeader || !authHeader.startsWith('Bearer ')) {
return reply.status(401).send({ error: "Authorization required" })
}
const accessToken = authHeader.substring(7)
const resourceUrl = (request.query as any).resource_url || 'https://chat.qwen.ai'
const modelsUrl = `${resourceUrl}/api/v1/models`
const response = await fetch(modelsUrl, {
headers: {
'Authorization': `Bearer ${accessToken}`,
'Accept': 'application/json'
}
})
if (!response.ok) {
const errorText = await response.text()
logger.error({ status: response.status, errorText }, "Qwen models request failed")
return reply.status(response.status).send({ error: "Models request failed", details: errorText })
}
const data = await response.json()
return reply.send(data)
} catch (error) {
logger.error({ error }, "Qwen models request failed")
return reply.status(500).send({ error: "Models request failed" })
}
})
logger.info("Qwen OAuth routes registered")
}
function generateState(): string {
return Math.random().toString(36).substring(2, 15) + Date.now().toString(36)
}

View File

@@ -24,12 +24,29 @@ const InstanceDataSchema = z.object({
messageHistory: z.array(z.string()).default([]),
agentModelSelections: z.record(z.string(), ModelPreferenceSchema).default({}),
sessionTasks: z.record(z.string(), z.array(TaskSchema)).optional(),
sessionSkills: z
.record(
z.string(),
z.array(z.object({ id: z.string(), name: z.string(), description: z.string().optional() })),
)
.optional(),
customAgents: z
.array(
z.object({
name: z.string(),
description: z.string().optional(),
prompt: z.string(),
}),
)
.optional(),
})
const EMPTY_INSTANCE_DATA: InstanceData = {
messageHistory: [],
agentModelSelections: {},
sessionTasks: {},
sessionSkills: {},
customAgents: [],
}
export function registerStorageRoutes(app: FastifyInstance, deps: RouteDeps) {

View File

@@ -1,10 +1,18 @@
import { FastifyInstance, FastifyReply } from "fastify"
import { spawnSync } from "child_process"
import { z } from "zod"
import { existsSync, mkdirSync } from "fs"
import { cp, readFile, writeFile } from "fs/promises"
import path from "path"
import { WorkspaceManager } from "../../workspaces/manager"
import { InstanceStore } from "../../storage/instance-store"
import { ConfigStore } from "../../config/store"
import { getWorkspaceOpencodeConfigDir } from "../../opencode-config"
interface RouteDeps {
workspaceManager: WorkspaceManager
instanceStore: InstanceStore
configStore: ConfigStore
}
const WorkspaceCreateSchema = z.object({
@@ -163,6 +171,143 @@ export function registerWorkspaceRoutes(app: FastifyInstance, deps: RouteDeps) {
return { isRepo: true, branch, ahead, behind, changes }
})
app.post<{
Params: { id: string }
Body: { destination: string; includeConfig?: boolean }
}>("/api/workspaces/:id/export", async (request, reply) => {
const workspace = deps.workspaceManager.get(request.params.id)
if (!workspace) {
reply.code(404)
return { error: "Workspace not found" }
}
const payload = request.body ?? { destination: "" }
const destination = payload.destination?.trim()
if (!destination) {
reply.code(400)
return { error: "Destination is required" }
}
const exportRoot = path.join(destination, `nomadarch-export-${path.basename(workspace.path)}-${Date.now()}`)
mkdirSync(exportRoot, { recursive: true })
const workspaceTarget = path.join(exportRoot, "workspace")
await cp(workspace.path, workspaceTarget, { recursive: true, force: true })
const instanceData = await deps.instanceStore.read(workspace.path)
await writeFile(path.join(exportRoot, "instance-data.json"), JSON.stringify(instanceData, null, 2), "utf-8")
const configDir = getWorkspaceOpencodeConfigDir(workspace.id)
if (existsSync(configDir)) {
await cp(configDir, path.join(exportRoot, "opencode-config"), { recursive: true, force: true })
}
if (payload.includeConfig) {
const config = deps.configStore.get()
await writeFile(path.join(exportRoot, "user-config.json"), JSON.stringify(config, null, 2), "utf-8")
}
const metadata = {
exportedAt: new Date().toISOString(),
workspacePath: workspace.path,
workspaceId: workspace.id,
}
await writeFile(path.join(exportRoot, "metadata.json"), JSON.stringify(metadata, null, 2), "utf-8")
return { destination: exportRoot }
})
app.get<{ Params: { id: string } }>("/api/workspaces/:id/mcp-config", async (request, reply) => {
const workspace = deps.workspaceManager.get(request.params.id)
if (!workspace) {
reply.code(404)
return { error: "Workspace not found" }
}
const configPath = path.join(workspace.path, ".mcp.json")
if (!existsSync(configPath)) {
return { path: configPath, exists: false, config: { mcpServers: {} } }
}
try {
const raw = await readFile(configPath, "utf-8")
const parsed = raw ? JSON.parse(raw) : {}
return { path: configPath, exists: true, config: parsed }
} catch (error) {
request.log.error({ err: error }, "Failed to read MCP config")
reply.code(500)
return { error: "Failed to read MCP config" }
}
})
app.put<{ Params: { id: string } }>("/api/workspaces/:id/mcp-config", async (request, reply) => {
const workspace = deps.workspaceManager.get(request.params.id)
if (!workspace) {
reply.code(404)
return { error: "Workspace not found" }
}
const body = request.body as { config?: unknown }
if (!body || typeof body.config !== "object" || body.config === null) {
reply.code(400)
return { error: "Invalid MCP config payload" }
}
const configPath = path.join(workspace.path, ".mcp.json")
try {
await writeFile(configPath, JSON.stringify(body.config, null, 2), "utf-8")
return { path: configPath, exists: true, config: body.config }
} catch (error) {
request.log.error({ err: error }, "Failed to write MCP config")
reply.code(500)
return { error: "Failed to write MCP config" }
}
})
app.post<{
Body: { source: string; destination: string; includeConfig?: boolean }
}>("/api/workspaces/import", async (request, reply) => {
const payload = request.body ?? { source: "", destination: "" }
const source = payload.source?.trim()
const destination = payload.destination?.trim()
if (!source || !destination) {
reply.code(400)
return { error: "Source and destination are required" }
}
const workspaceSource = path.join(source, "workspace")
if (!existsSync(workspaceSource)) {
reply.code(400)
return { error: "Export workspace folder not found" }
}
await cp(workspaceSource, destination, { recursive: true, force: true })
const workspace = await deps.workspaceManager.create(destination)
const instanceDataPath = path.join(source, "instance-data.json")
if (existsSync(instanceDataPath)) {
const raw = await readFile(instanceDataPath, "utf-8")
await deps.instanceStore.write(workspace.path, JSON.parse(raw))
}
const configSource = path.join(source, "opencode-config")
if (existsSync(configSource)) {
const configTarget = getWorkspaceOpencodeConfigDir(workspace.id)
await cp(configSource, configTarget, { recursive: true, force: true })
}
if (payload.includeConfig) {
const userConfigPath = path.join(source, "user-config.json")
if (existsSync(userConfigPath)) {
const raw = await readFile(userConfigPath, "utf-8")
deps.configStore.replace(JSON.parse(raw))
}
}
return workspace
})
}

View File

@@ -1,16 +1,15 @@
import { FastifyInstance } from "fastify"
import { ZAIClient, type ZAIConfig, type ZAIChatRequest } from "../../integrations/zai-api"
import { ZAIClient, ZAI_MODELS, type ZAIConfig, type ZAIChatRequest, ZAIChatRequestSchema } from "../../integrations/zai-api"
import { Logger } from "../../logger"
import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs"
import { join } from "path"
import { homedir } from "os"
import { getUserIntegrationsDir } from "../../user-data"
interface ZAIRouteDeps {
logger: Logger
}
// Config file path
const CONFIG_DIR = join(homedir(), ".nomadarch")
const CONFIG_DIR = getUserIntegrationsDir()
const CONFIG_FILE = join(CONFIG_DIR, "zai-config.json")
export async function registerZAIRoutes(
@@ -69,15 +68,7 @@ export async function registerZAIRoutes(
// List available models
app.get('/api/zai/models', async (request, reply) => {
try {
const config = getZAIConfig()
if (!config.enabled) {
return reply.status(400).send({ error: "Z.AI is not enabled" })
}
const client = new ZAIClient(config)
const models = await client.listModels()
return { models: models.map(name => ({ name, provider: "zai" })) }
return { models: ZAI_MODELS.map(name => ({ name, provider: "zai" })) }
} catch (error) {
logger.error({ error }, "Failed to list Z.AI models")
return reply.status(500).send({ error: "Failed to list models" })
@@ -107,8 +98,9 @@ export async function registerZAIRoutes(
for await (const chunk of client.chatStream(chatRequest)) {
reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`)
// Check for message_stop event
if (chunk.type === "message_stop") {
// Check for finish_reason to end stream
const finishReason = chunk.choices[0]?.finish_reason
if (finishReason) {
reply.raw.write('data: [DONE]\n\n')
break
}
@@ -133,16 +125,15 @@ export async function registerZAIRoutes(
logger.info("Z.AI routes registered")
}
// Configuration management functions using file-based storage
function getZAIConfig(): ZAIConfig {
try {
if (existsSync(CONFIG_FILE)) {
const data = readFileSync(CONFIG_FILE, 'utf-8')
return JSON.parse(data)
}
return { enabled: false, endpoint: "https://api.z.ai/api/anthropic", timeout: 3000000 }
return { enabled: false, endpoint: "https://api.z.ai/api/paas/v4", timeout: 300000 }
} catch {
return { enabled: false, endpoint: "https://api.z.ai/api/anthropic", timeout: 3000000 }
return { enabled: false, endpoint: "https://api.z.ai/api/paas/v4", timeout: 300000 }
}
}

View File

@@ -1,8 +1,8 @@
import fs from "fs"
import { promises as fsp } from "fs"
import os from "os"
import path from "path"
import type { InstanceData } from "../api-types"
import { getUserInstancesDir } from "../user-data"
const DEFAULT_INSTANCE_DATA: InstanceData = {
messageHistory: [],
@@ -13,7 +13,7 @@ const DEFAULT_INSTANCE_DATA: InstanceData = {
export class InstanceStore {
private readonly instancesDir: string
constructor(baseDir = path.join(os.homedir(), ".config", "codenomad", "instances")) {
constructor(baseDir = getUserInstancesDir()) {
this.instancesDir = baseDir
fs.mkdirSync(this.instancesDir, { recursive: true })
}

View File

@@ -0,0 +1,28 @@
import os from "os"
import path from "path"
const DEFAULT_ROOT = path.join(os.homedir(), ".config", "codenomad")
export function getUserDataRoot(): string {
const override = process.env.CODENOMAD_USER_DIR
if (override && override.trim().length > 0) {
return path.resolve(override)
}
return DEFAULT_ROOT
}
export function getUserConfigPath(): string {
return path.join(getUserDataRoot(), "config.json")
}
export function getUserInstancesDir(): string {
return path.join(getUserDataRoot(), "instances")
}
export function getUserIntegrationsDir(): string {
return path.join(getUserDataRoot(), "integrations")
}
export function getOpencodeWorkspacesRoot(): string {
return path.join(getUserDataRoot(), "opencode-workspaces")
}

View File

@@ -0,0 +1,35 @@
import net from "net"
const DEFAULT_START_PORT = 3000
const MAX_PORT_ATTEMPTS = 50
function isPortAvailable(port: number): Promise<boolean> {
return new Promise((resolve) => {
const server = net.createServer()
server.once("error", () => {
resolve(false)
})
server.once("listening", () => {
server.close()
resolve(true)
})
server.listen(port, "127.0.0.1")
})
}
export async function findAvailablePort(startPort: number = DEFAULT_START_PORT): Promise<number> {
for (let port = startPort; port < startPort + MAX_PORT_ATTEMPTS; port++) {
if (await isPortAvailable(port)) {
return port
}
}
return 0
}
export async function getAvailablePort(preferredPort: number = DEFAULT_START_PORT): Promise<number> {
const isAvailable = await isPortAvailable(preferredPort)
if (isAvailable) {
return preferredPort
}
return findAvailablePort(preferredPort + 1)
}

View File

@@ -10,7 +10,7 @@ import { clearWorkspaceSearchCache } from "../filesystem/search-cache"
import { WorkspaceDescriptor, WorkspaceFileResponse, FileSystemEntry } from "../api-types"
import { WorkspaceRuntime, ProcessExitInfo } from "./runtime"
import { Logger } from "../logger"
import { getOpencodeConfigDir } from "../opencode-config"
import { ensureWorkspaceOpencodeConfig } from "../opencode-config"
const STARTUP_STABILITY_DELAY_MS = 1500
@@ -27,11 +27,9 @@ interface WorkspaceRecord extends WorkspaceDescriptor {}
export class WorkspaceManager {
private readonly workspaces = new Map<string, WorkspaceRecord>()
private readonly runtime: WorkspaceRuntime
private readonly opencodeConfigDir: string
constructor(private readonly options: WorkspaceManagerOptions) {
this.runtime = new WorkspaceRuntime(this.options.eventBus, this.options.logger)
this.opencodeConfigDir = getOpencodeConfigDir()
}
list(): WorkspaceDescriptor[] {
@@ -105,9 +103,10 @@ export class WorkspaceManager {
const preferences = this.options.configStore.get().preferences ?? {}
const userEnvironment = preferences.environmentVariables ?? {}
const opencodeConfigDir = ensureWorkspaceOpencodeConfig(id)
const environment = {
...userEnvironment,
OPENCODE_CONFIG_DIR: this.opencodeConfigDir,
OPENCODE_CONFIG_DIR: opencodeConfigDir,
}
try {