188 lines
6.1 KiB
TypeScript
188 lines
6.1 KiB
TypeScript
import type { ChatMessage, APIResponse } from "@/types";
|
|
|
|
export interface ZaiPlanConfig {
|
|
apiKey?: string;
|
|
generalEndpoint?: string;
|
|
codingEndpoint?: string;
|
|
}
|
|
|
|
export class ZaiPlanService {
|
|
private config: ZaiPlanConfig;
|
|
|
|
constructor(config: ZaiPlanConfig = {}) {
|
|
this.config = {
|
|
generalEndpoint: config.generalEndpoint || "https://api.z.ai/api/paas/v4",
|
|
codingEndpoint: config.codingEndpoint || "https://api.z.ai/api/coding/paas/v4",
|
|
apiKey: config.apiKey || process.env.ZAI_API_KEY,
|
|
};
|
|
}
|
|
|
|
private getHeaders(): Record<string, string> {
|
|
return {
|
|
"Content-Type": "application/json",
|
|
"Authorization": `Bearer ${this.config.apiKey}`,
|
|
"Accept-Language": "en-US,en",
|
|
};
|
|
}
|
|
|
|
async chatCompletion(
|
|
messages: ChatMessage[],
|
|
model: string = "glm-4.7",
|
|
useCodingEndpoint: boolean = false
|
|
): Promise<APIResponse<string>> {
|
|
try {
|
|
if (!this.config.apiKey) {
|
|
throw new Error("API key is required. Please configure your Z.AI API key in settings.");
|
|
}
|
|
|
|
const endpoint = useCodingEndpoint ? this.config.codingEndpoint : this.config.generalEndpoint;
|
|
|
|
console.log("[Z.AI] API call:", { endpoint, model, messages });
|
|
|
|
const response = await fetch(`${endpoint}/chat/completions`, {
|
|
method: "POST",
|
|
headers: this.getHeaders(),
|
|
body: JSON.stringify({
|
|
model,
|
|
messages,
|
|
stream: false,
|
|
}),
|
|
});
|
|
|
|
console.log("[Z.AI] Response status:", response.status, response.statusText);
|
|
|
|
if (!response.ok) {
|
|
const errorText = await response.text();
|
|
console.error("[Z.AI] Error response:", errorText);
|
|
throw new Error(`Chat completion failed (${response.status}): ${response.statusText} - ${errorText}`);
|
|
}
|
|
|
|
const data = await response.json();
|
|
console.log("[Z.AI] Response data:", data);
|
|
|
|
if (data.choices && data.choices[0] && data.choices[0].message) {
|
|
return { success: true, data: data.choices[0].message.content };
|
|
} else if (data.output && data.output.choices && data.output.choices[0]) {
|
|
return { success: true, data: data.output.choices[0].message.content };
|
|
} else {
|
|
return { success: false, error: "Unexpected response format" };
|
|
}
|
|
} catch (error) {
|
|
console.error("[Z.AI] Chat completion error:", error);
|
|
return {
|
|
success: false,
|
|
error: error instanceof Error ? error.message : "Chat completion failed",
|
|
};
|
|
}
|
|
}
|
|
|
|
async enhancePrompt(prompt: string, model?: string): Promise<APIResponse<string>> {
|
|
const systemMessage: ChatMessage = {
|
|
role: "system",
|
|
content: `You are an expert prompt engineer. Your task is to enhance user prompts to make them more precise, actionable, and effective for AI coding agents.
|
|
|
|
Apply these principles:
|
|
1. Add specific context about project and requirements
|
|
2. Clarify constraints and preferences
|
|
3. Define expected output format clearly
|
|
4. Include edge cases and error handling requirements
|
|
5. Specify testing and validation criteria
|
|
|
|
Return ONLY the enhanced prompt, no explanations or extra text.`,
|
|
};
|
|
|
|
const userMessage: ChatMessage = {
|
|
role: "user",
|
|
content: `Enhance this prompt for an AI coding agent:\n\n${prompt}`,
|
|
};
|
|
|
|
return this.chatCompletion([systemMessage, userMessage], model || "glm-4.7", true);
|
|
}
|
|
|
|
async generatePRD(idea: string, model?: string): Promise<APIResponse<string>> {
|
|
const systemMessage: ChatMessage = {
|
|
role: "system",
|
|
content: `You are an expert product manager and technical architect. Generate a comprehensive Product Requirements Document (PRD) based on user's idea.
|
|
|
|
Structure your PRD with these sections:
|
|
1. Overview & Objectives
|
|
2. User Personas & Use Cases
|
|
3. Functional Requirements (prioritized by importance)
|
|
4. Non-functional Requirements
|
|
5. Technical Architecture Recommendations
|
|
6. Success Metrics & KPIs
|
|
|
|
Use clear, specific language suitable for development teams.`,
|
|
};
|
|
|
|
const userMessage: ChatMessage = {
|
|
role: "user",
|
|
content: `Generate a PRD for this idea:\n\n${idea}`,
|
|
};
|
|
|
|
return this.chatCompletion([systemMessage, userMessage], model || "glm-4.7");
|
|
}
|
|
|
|
async generateActionPlan(prd: string, model?: string): Promise<APIResponse<string>> {
|
|
const systemMessage: ChatMessage = {
|
|
role: "system",
|
|
content: `You are an expert technical lead and project manager. Generate a detailed action plan based on the PRD.
|
|
|
|
Structure of action plan with:
|
|
1. Task breakdown with priorities (High/Medium/Low)
|
|
2. Dependencies between tasks
|
|
3. Estimated effort for each task
|
|
4. Recommended frameworks and technologies
|
|
5. Architecture guidelines and best practices
|
|
|
|
Include specific recommendations for:
|
|
- Frontend frameworks
|
|
- Backend architecture
|
|
- Database choices
|
|
- Authentication/authorization
|
|
- Deployment strategy`,
|
|
};
|
|
|
|
const userMessage: ChatMessage = {
|
|
role: "user",
|
|
content: `Generate an action plan based on this PRD:\n\n${prd}`,
|
|
};
|
|
|
|
return this.chatCompletion([systemMessage, userMessage], model || "glm-4.7", true);
|
|
}
|
|
|
|
async listModels(): Promise<APIResponse<string[]>> {
|
|
try {
|
|
if (this.config.apiKey) {
|
|
const response = await fetch(`${this.config.generalEndpoint}/models`, {
|
|
headers: this.getHeaders(),
|
|
});
|
|
|
|
if (!response.ok) {
|
|
throw new Error(`Failed to list models: ${response.statusText}`);
|
|
}
|
|
|
|
const data = await response.json();
|
|
const models = data.data?.map((m: any) => m.id) || [];
|
|
|
|
return { success: true, data: models };
|
|
} else {
|
|
console.log("[Z.AI] No API key, using fallback models");
|
|
return { success: true, data: ["glm-4.7", "glm-4.6", "glm-4.5", "glm-4.5-air", "glm-4-flash", "glm-4-flashx"] };
|
|
}
|
|
} catch (error) {
|
|
console.error("[Z.AI] listModels error:", error);
|
|
return {
|
|
success: false,
|
|
error: error instanceof Error ? error.message : "Failed to list models",
|
|
};
|
|
}
|
|
}
|
|
|
|
getAvailableModels(): string[] {
|
|
return ["glm-4.7", "glm-4.6", "glm-4.5", "glm-4.5-air", "glm-4-flash", "glm-4-flashx"];
|
|
}
|
|
}
|
|
|
|
export default ZaiPlanService;
|