Initialize PromptArch: The Prompt Enhancer (Fork of ClavixDev/Clavix)
This commit is contained in:
5
lib/services/adapter-instance.ts
Normal file
5
lib/services/adapter-instance.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
import ModelAdapter from "./model-adapter";
|
||||
|
||||
const adapter = new ModelAdapter();
|
||||
|
||||
export default adapter;
|
||||
194
lib/services/model-adapter.ts
Normal file
194
lib/services/model-adapter.ts
Normal file
@@ -0,0 +1,194 @@
|
||||
import type { ModelProvider, APIResponse, ChatMessage } from "@/types";
|
||||
import QwenOAuthService from "./qwen-oauth";
|
||||
import OllamaCloudService from "./ollama-cloud";
|
||||
import ZaiPlanService from "./zai-plan";
|
||||
|
||||
export interface ModelAdapterConfig {
|
||||
qwen?: {
|
||||
apiKey?: string;
|
||||
endpoint?: string;
|
||||
};
|
||||
ollama?: {
|
||||
apiKey?: string;
|
||||
endpoint?: string;
|
||||
};
|
||||
zai?: {
|
||||
apiKey?: string;
|
||||
generalEndpoint?: string;
|
||||
codingEndpoint?: string;
|
||||
};
|
||||
}
|
||||
|
||||
export class ModelAdapter {
|
||||
private qwenService: QwenOAuthService;
|
||||
private ollamaService: OllamaCloudService;
|
||||
private zaiService: ZaiPlanService;
|
||||
private preferredProvider: ModelProvider;
|
||||
|
||||
constructor(config: ModelAdapterConfig = {}, preferredProvider: ModelProvider = "qwen") {
|
||||
this.qwenService = new QwenOAuthService(config.qwen);
|
||||
this.ollamaService = new OllamaCloudService(config.ollama);
|
||||
this.zaiService = new ZaiPlanService(config.zai);
|
||||
this.preferredProvider = preferredProvider;
|
||||
}
|
||||
|
||||
setPreferredProvider(provider: ModelProvider): void {
|
||||
this.preferredProvider = provider;
|
||||
}
|
||||
|
||||
updateQwenApiKey(apiKey: string): void {
|
||||
this.qwenService = new QwenOAuthService({ apiKey });
|
||||
}
|
||||
|
||||
setQwenOAuthTokens(accessToken: string, refreshToken?: string, expiresIn?: number): void {
|
||||
this.qwenService.setOAuthTokens(accessToken, refreshToken, expiresIn);
|
||||
}
|
||||
|
||||
getQwenAuthUrl(): string {
|
||||
return this.qwenService.getAuthorizationUrl();
|
||||
}
|
||||
|
||||
updateOllamaApiKey(apiKey: string): void {
|
||||
this.ollamaService = new OllamaCloudService({ apiKey });
|
||||
}
|
||||
|
||||
updateZaiApiKey(apiKey: string): void {
|
||||
this.zaiService = new ZaiPlanService({ apiKey });
|
||||
}
|
||||
|
||||
private async callWithFallback<T>(
|
||||
operation: (service: any) => Promise<APIResponse<T>>,
|
||||
providers: ModelProvider[]
|
||||
): Promise<APIResponse<T>> {
|
||||
for (const provider of providers) {
|
||||
try {
|
||||
let service: any;
|
||||
|
||||
switch (provider) {
|
||||
case "qwen":
|
||||
service = this.qwenService;
|
||||
break;
|
||||
case "ollama":
|
||||
service = this.ollamaService;
|
||||
break;
|
||||
case "zai":
|
||||
service = this.zaiService;
|
||||
break;
|
||||
}
|
||||
|
||||
const result = await operation(service);
|
||||
if (result.success) {
|
||||
return result;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error with ${provider}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: "All providers failed",
|
||||
};
|
||||
}
|
||||
|
||||
async enhancePrompt(prompt: string, provider?: ModelProvider, model?: string): Promise<APIResponse<string>> {
|
||||
const providers: ModelProvider[] = provider ? [provider] : [this.preferredProvider, "ollama", "zai"];
|
||||
return this.callWithFallback((service) => service.enhancePrompt(prompt, model), providers);
|
||||
}
|
||||
|
||||
async generatePRD(idea: string, provider?: ModelProvider, model?: string): Promise<APIResponse<string>> {
|
||||
const providers: ModelProvider[] = provider ? [provider] : ["ollama", "zai", this.preferredProvider];
|
||||
return this.callWithFallback((service) => service.generatePRD(idea, model), providers);
|
||||
}
|
||||
|
||||
async generateActionPlan(prd: string, provider?: ModelProvider, model?: string): Promise<APIResponse<string>> {
|
||||
const providers: ModelProvider[] = provider ? [provider] : ["zai", "ollama", this.preferredProvider];
|
||||
return this.callWithFallback((service) => service.generateActionPlan(prd, model), providers);
|
||||
}
|
||||
|
||||
async chatCompletion(
|
||||
messages: ChatMessage[],
|
||||
model: string,
|
||||
provider: ModelProvider = this.preferredProvider
|
||||
): Promise<APIResponse<string>> {
|
||||
try {
|
||||
let service: any;
|
||||
|
||||
switch (provider) {
|
||||
case "qwen":
|
||||
service = this.qwenService;
|
||||
break;
|
||||
case "ollama":
|
||||
service = this.ollamaService;
|
||||
break;
|
||||
case "zai":
|
||||
service = this.zaiService;
|
||||
break;
|
||||
}
|
||||
|
||||
return await service.chatCompletion(messages, model);
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Chat completion failed",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async listModels(provider?: ModelProvider): Promise<APIResponse<Record<ModelProvider, string[]>>> {
|
||||
const fallbackModels: Record<ModelProvider, string[]> = {
|
||||
qwen: ["qwen-coder-plus", "qwen-coder-turbo", "qwen-coder-lite"],
|
||||
ollama: ["gpt-oss:120b", "llama3.1", "gemma3", "deepseek-r1", "qwen3"],
|
||||
zai: ["glm-4.7", "glm-4.5", "glm-4.5-air", "glm-4-flash", "glm-4-flashx"],
|
||||
};
|
||||
const models: Record<ModelProvider, string[]> = { ...fallbackModels };
|
||||
|
||||
if (provider === "ollama" || !provider) {
|
||||
try {
|
||||
const ollamaModels = await this.ollamaService.listModels();
|
||||
if (ollamaModels.success && ollamaModels.data && ollamaModels.data.length > 0) {
|
||||
models.ollama = ollamaModels.data;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[ModelAdapter] Failed to load Ollama models, using fallback:", error);
|
||||
}
|
||||
}
|
||||
if (provider === "zai" || !provider) {
|
||||
try {
|
||||
const zaiModels = await this.zaiService.listModels();
|
||||
if (zaiModels.success && zaiModels.data && zaiModels.data.length > 0) {
|
||||
models.zai = zaiModels.data;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[ModelAdapter] Failed to load Z.AI models, using fallback:", error);
|
||||
}
|
||||
}
|
||||
if (provider === "qwen" || !provider) {
|
||||
try {
|
||||
const qwenModels = await this.qwenService.listModels();
|
||||
if (qwenModels.success && qwenModels.data && qwenModels.data.length > 0) {
|
||||
models.qwen = qwenModels.data;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[ModelAdapter] Failed to load Qwen models, using fallback:", error);
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, data: models };
|
||||
}
|
||||
|
||||
getAvailableModels(provider: ModelProvider): string[] {
|
||||
switch (provider) {
|
||||
case "qwen":
|
||||
return this.qwenService.getAvailableModels();
|
||||
case "ollama":
|
||||
return this.ollamaService.getAvailableModels();
|
||||
case "zai":
|
||||
return this.zaiService.getAvailableModels();
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default ModelAdapter;
|
||||
203
lib/services/ollama-cloud.ts
Normal file
203
lib/services/ollama-cloud.ts
Normal file
@@ -0,0 +1,203 @@
|
||||
import type { ChatMessage, APIResponse } from "@/types";
|
||||
|
||||
export interface OllamaCloudConfig {
|
||||
apiKey?: string;
|
||||
endpoint?: string;
|
||||
}
|
||||
|
||||
export interface OllamaModel {
|
||||
name: string;
|
||||
size?: number;
|
||||
digest?: string;
|
||||
}
|
||||
|
||||
export class OllamaCloudService {
|
||||
private config: OllamaCloudConfig;
|
||||
private availableModels: string[] = [];
|
||||
|
||||
constructor(config: OllamaCloudConfig = {}) {
|
||||
this.config = {
|
||||
endpoint: config.endpoint || "https://ollama.com/api",
|
||||
apiKey: config.apiKey || process.env.OLLAMA_API_KEY,
|
||||
};
|
||||
}
|
||||
|
||||
private getHeaders(): Record<string, string> {
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
};
|
||||
|
||||
if (this.config.apiKey) {
|
||||
headers["Authorization"] = `Bearer ${this.config.apiKey}`;
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
async chatCompletion(
|
||||
messages: ChatMessage[],
|
||||
model: string = "gpt-oss:120b",
|
||||
stream: boolean = false
|
||||
): Promise<APIResponse<string>> {
|
||||
try {
|
||||
if (!this.config.apiKey) {
|
||||
throw new Error("API key is required. Please configure your Ollama API key in settings.");
|
||||
}
|
||||
|
||||
console.log("[Ollama] API call:", { endpoint: this.config.endpoint, model, messages });
|
||||
|
||||
const response = await fetch(`${this.config.endpoint}/chat`, {
|
||||
method: "POST",
|
||||
headers: this.getHeaders(),
|
||||
body: JSON.stringify({
|
||||
model,
|
||||
messages,
|
||||
stream,
|
||||
}),
|
||||
});
|
||||
|
||||
console.log("[Ollama] Response status:", response.status, response.statusText);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
console.error("[Ollama] Error response:", errorText);
|
||||
throw new Error(`Chat completion failed (${response.status}): ${response.statusText} - ${errorText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log("[Ollama] Response data:", data);
|
||||
|
||||
if (data.message && data.message.content) {
|
||||
return { success: true, data: data.message.content };
|
||||
} else if (data.choices && data.choices[0]) {
|
||||
return { success: true, data: data.choices[0].message.content };
|
||||
} else {
|
||||
return { success: false, error: "Unexpected response format" };
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[Ollama] Chat completion error:", error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Chat completion failed",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async listModels(): Promise<APIResponse<string[]>> {
|
||||
try {
|
||||
if (this.config.apiKey) {
|
||||
console.log("[Ollama] Listing models from:", `${this.config.endpoint}/tags`);
|
||||
|
||||
const response = await fetch(`${this.config.endpoint}/tags`, {
|
||||
headers: this.getHeaders(),
|
||||
});
|
||||
|
||||
console.log("[Ollama] List models response status:", response.status, response.statusText);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to list models: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log("[Ollama] Models data:", data);
|
||||
const models = data.models?.map((m: OllamaModel) => m.name) || [];
|
||||
|
||||
this.availableModels = models;
|
||||
|
||||
return { success: true, data: models };
|
||||
} else {
|
||||
console.log("[Ollama] No API key, using fallback models");
|
||||
return { success: true, data: ["gpt-oss:120b", "llama3.1", "gemma3", "deepseek-r1", "qwen3"] };
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[Ollama] listModels error:", error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Failed to list models",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
getAvailableModels(): string[] {
|
||||
return this.availableModels.length > 0
|
||||
? this.availableModels
|
||||
: ["gpt-oss:120b", "llama3.1", "gemma3", "deepseek-r1", "qwen3"];
|
||||
}
|
||||
|
||||
async enhancePrompt(prompt: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert prompt engineer. Your task is to enhance user prompts to make them more precise, actionable, and effective for AI coding agents.
|
||||
|
||||
Apply these principles:
|
||||
1. Add specific context about project and requirements
|
||||
2. Clarify constraints and preferences
|
||||
3. Define expected output format clearly
|
||||
4. Include edge cases and error handling requirements
|
||||
5. Specify testing and validation criteria
|
||||
|
||||
Return ONLY the enhanced prompt, no explanations.`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Enhance this prompt for an AI coding agent:\n\n${prompt}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b");
|
||||
}
|
||||
|
||||
async generatePRD(idea: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert product manager and technical architect. Generate a comprehensive Product Requirements Document (PRD) based on user's idea.
|
||||
|
||||
Structure your PRD with these sections:
|
||||
1. Overview & Objectives
|
||||
2. User Personas & Use Cases
|
||||
3. Functional Requirements (prioritized)
|
||||
4. Non-functional Requirements
|
||||
5. Technical Architecture Recommendations
|
||||
6. Success Metrics & KPIs
|
||||
|
||||
Use clear, specific language suitable for development teams.`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Generate a PRD for this idea:\n\n${idea}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b");
|
||||
}
|
||||
|
||||
async generateActionPlan(prd: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert technical lead and project manager. Generate a detailed action plan based on PRD.
|
||||
|
||||
Structure of action plan with:
|
||||
1. Task breakdown with priorities (High/Medium/Low)
|
||||
2. Dependencies between tasks
|
||||
3. Estimated effort for each task
|
||||
4. Recommended frameworks and technologies
|
||||
5. Architecture guidelines and best practices
|
||||
|
||||
Include specific recommendations for:
|
||||
- Frontend frameworks
|
||||
- Backend architecture
|
||||
- Database choices
|
||||
- Authentication/authorization
|
||||
- Deployment strategy`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Generate an action plan based on this PRD:\n\n${prd}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b");
|
||||
}
|
||||
}
|
||||
|
||||
export default OllamaCloudService;
|
||||
217
lib/services/qwen-oauth.ts
Normal file
217
lib/services/qwen-oauth.ts
Normal file
@@ -0,0 +1,217 @@
|
||||
import type { ChatMessage, APIResponse } from "@/types";
|
||||
|
||||
export interface QwenOAuthConfig {
|
||||
apiKey?: string;
|
||||
accessToken?: string;
|
||||
refreshToken?: string;
|
||||
expiresAt?: number;
|
||||
endpoint?: string;
|
||||
clientId?: string;
|
||||
redirectUri?: string;
|
||||
}
|
||||
|
||||
export class QwenOAuthService {
|
||||
private config: QwenOAuthConfig;
|
||||
|
||||
constructor(config: QwenOAuthConfig = {}) {
|
||||
this.config = {
|
||||
endpoint: config.endpoint || "https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
|
||||
apiKey: config.apiKey || process.env.QWEN_API_KEY,
|
||||
accessToken: config.accessToken,
|
||||
refreshToken: config.refreshToken,
|
||||
expiresAt: config.expiresAt,
|
||||
clientId: config.clientId || process.env.NEXT_PUBLIC_QWEN_CLIENT_ID,
|
||||
redirectUri: config.redirectUri || (typeof window !== "undefined" ? window.location.origin : ""),
|
||||
};
|
||||
}
|
||||
|
||||
private getHeaders(): Record<string, string> {
|
||||
const authHeader = this.config.accessToken
|
||||
? `Bearer ${this.config.accessToken}`
|
||||
: `Bearer ${this.config.apiKey}`;
|
||||
|
||||
return {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": authHeader,
|
||||
};
|
||||
}
|
||||
|
||||
isAuthenticated(): boolean {
|
||||
return !!(this.config.apiKey || (this.config.accessToken && (!this.config.expiresAt || this.config.expiresAt > Date.now())));
|
||||
}
|
||||
|
||||
getAccessToken(): string | null {
|
||||
return this.config.accessToken || this.config.apiKey || null;
|
||||
}
|
||||
|
||||
async authenticate(apiKey: string): Promise<APIResponse<string>> {
|
||||
try {
|
||||
this.config.apiKey = apiKey;
|
||||
this.config.accessToken = undefined; // Clear OAuth token if API key is provided
|
||||
return { success: true, data: "Authenticated successfully" };
|
||||
} catch (error) {
|
||||
console.error("Qwen authentication error:", error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Authentication failed",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
setOAuthTokens(accessToken: string, refreshToken?: string, expiresIn?: number): void {
|
||||
this.config.accessToken = accessToken;
|
||||
if (refreshToken) this.config.refreshToken = refreshToken;
|
||||
if (expiresIn) this.config.expiresAt = Date.now() + expiresIn * 1000;
|
||||
}
|
||||
|
||||
getAuthorizationUrl(): string {
|
||||
const baseUrl = "https://dashscope.console.aliyun.com/oauth/authorize"; // Placeholder URL
|
||||
const params = new URLSearchParams({
|
||||
client_id: this.config.clientId || "",
|
||||
redirect_uri: this.config.redirectUri || "",
|
||||
response_type: "code",
|
||||
scope: "dashscope:chat",
|
||||
});
|
||||
return `${baseUrl}?${params.toString()}`;
|
||||
}
|
||||
|
||||
async logout(): Promise<void> {
|
||||
this.config.apiKey = undefined;
|
||||
this.config.accessToken = undefined;
|
||||
this.config.refreshToken = undefined;
|
||||
this.config.expiresAt = undefined;
|
||||
}
|
||||
|
||||
async chatCompletion(
|
||||
messages: ChatMessage[],
|
||||
model: string = "qwen-coder-plus",
|
||||
stream: boolean = false
|
||||
): Promise<APIResponse<string>> {
|
||||
try {
|
||||
if (!this.config.apiKey) {
|
||||
throw new Error("API key is required. Please configure your Qwen API key in settings.");
|
||||
}
|
||||
|
||||
console.log("[Qwen] API call:", { endpoint: this.config.endpoint, model, messages });
|
||||
|
||||
const response = await fetch(`${this.config.endpoint}/chat/completions`, {
|
||||
method: "POST",
|
||||
headers: this.getHeaders(),
|
||||
body: JSON.stringify({
|
||||
model,
|
||||
messages,
|
||||
stream,
|
||||
}),
|
||||
});
|
||||
|
||||
console.log("[Qwen] Response status:", response.status, response.statusText);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
console.error("[Qwen] Error response:", errorText);
|
||||
throw new Error(`Chat completion failed (${response.status}): ${response.statusText} - ${errorText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log("[Qwen] Response data:", data);
|
||||
|
||||
if (data.choices && data.choices[0] && data.choices[0].message) {
|
||||
return { success: true, data: data.choices[0].message.content };
|
||||
} else {
|
||||
return { success: false, error: "Unexpected response format" };
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[Qwen] Chat completion error:", error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Chat completion failed",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async enhancePrompt(prompt: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert prompt engineer. Your task is to enhance user prompts to make them more precise, actionable, and effective for AI coding agents.
|
||||
|
||||
Apply these principles:
|
||||
1. Add specific context about project and requirements
|
||||
2. Clarify constraints and preferences
|
||||
3. Define expected output format clearly
|
||||
4. Include edge cases and error handling requirements
|
||||
5. Specify testing and validation criteria
|
||||
|
||||
Return ONLY the enhanced prompt, no explanations or extra text.`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Enhance this prompt for an AI coding agent:\n\n${prompt}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "qwen-coder-plus");
|
||||
}
|
||||
|
||||
async generatePRD(idea: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert product manager and technical architect. Generate a comprehensive Product Requirements Document (PRD) based on user's idea.
|
||||
|
||||
Structure your PRD with these sections:
|
||||
1. Overview & Objectives
|
||||
2. User Personas & Use Cases
|
||||
3. Functional Requirements (prioritized)
|
||||
4. Non-functional Requirements
|
||||
5. Technical Architecture Recommendations
|
||||
6. Success Metrics & KPIs
|
||||
|
||||
Use clear, specific language suitable for development teams.`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Generate a PRD for this idea:\n\n${idea}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "qwen-coder-plus");
|
||||
}
|
||||
|
||||
async generateActionPlan(prd: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert technical lead and project manager. Generate a detailed action plan based on PRD.
|
||||
|
||||
Structure of action plan with:
|
||||
1. Task breakdown with priorities (High/Medium/Low)
|
||||
2. Dependencies between tasks
|
||||
3. Estimated effort for each task
|
||||
4. Recommended frameworks and technologies
|
||||
5. Architecture guidelines and best practices
|
||||
|
||||
Include specific recommendations for:
|
||||
- Frontend frameworks
|
||||
- Backend architecture
|
||||
- Database choices
|
||||
- Authentication/authorization
|
||||
- Deployment strategy`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Generate an action plan based on this PRD:\n\n${prd}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "qwen-coder-plus");
|
||||
}
|
||||
|
||||
async listModels(): Promise<APIResponse<string[]>> {
|
||||
const models = ["qwen-coder-plus", "qwen-coder-turbo", "qwen-coder-lite", "qwen-plus", "qwen-turbo", "qwen-max"];
|
||||
return { success: true, data: models };
|
||||
}
|
||||
|
||||
getAvailableModels(): string[] {
|
||||
return ["qwen-coder-plus", "qwen-coder-turbo", "qwen-coder-lite", "qwen-plus", "qwen-turbo", "qwen-max"];
|
||||
}
|
||||
}
|
||||
|
||||
export default QwenOAuthService;
|
||||
187
lib/services/zai-plan.ts
Normal file
187
lib/services/zai-plan.ts
Normal file
@@ -0,0 +1,187 @@
|
||||
import type { ChatMessage, APIResponse } from "@/types";
|
||||
|
||||
export interface ZaiPlanConfig {
|
||||
apiKey?: string;
|
||||
generalEndpoint?: string;
|
||||
codingEndpoint?: string;
|
||||
}
|
||||
|
||||
export class ZaiPlanService {
|
||||
private config: ZaiPlanConfig;
|
||||
|
||||
constructor(config: ZaiPlanConfig = {}) {
|
||||
this.config = {
|
||||
generalEndpoint: config.generalEndpoint || "https://api.z.ai/api/paas/v4",
|
||||
codingEndpoint: config.codingEndpoint || "https://api.z.ai/api/coding/paas/v4",
|
||||
apiKey: config.apiKey || process.env.ZAI_API_KEY,
|
||||
};
|
||||
}
|
||||
|
||||
private getHeaders(): Record<string, string> {
|
||||
return {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": `Bearer ${this.config.apiKey}`,
|
||||
"Accept-Language": "en-US,en",
|
||||
};
|
||||
}
|
||||
|
||||
async chatCompletion(
|
||||
messages: ChatMessage[],
|
||||
model: string = "glm-4.7",
|
||||
useCodingEndpoint: boolean = false
|
||||
): Promise<APIResponse<string>> {
|
||||
try {
|
||||
if (!this.config.apiKey) {
|
||||
throw new Error("API key is required. Please configure your Z.AI API key in settings.");
|
||||
}
|
||||
|
||||
const endpoint = useCodingEndpoint ? this.config.codingEndpoint : this.config.generalEndpoint;
|
||||
|
||||
console.log("[Z.AI] API call:", { endpoint, model, messages });
|
||||
|
||||
const response = await fetch(`${endpoint}/chat/completions`, {
|
||||
method: "POST",
|
||||
headers: this.getHeaders(),
|
||||
body: JSON.stringify({
|
||||
model,
|
||||
messages,
|
||||
stream: false,
|
||||
}),
|
||||
});
|
||||
|
||||
console.log("[Z.AI] Response status:", response.status, response.statusText);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
console.error("[Z.AI] Error response:", errorText);
|
||||
throw new Error(`Chat completion failed (${response.status}): ${response.statusText} - ${errorText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log("[Z.AI] Response data:", data);
|
||||
|
||||
if (data.choices && data.choices[0] && data.choices[0].message) {
|
||||
return { success: true, data: data.choices[0].message.content };
|
||||
} else if (data.output && data.output.choices && data.output.choices[0]) {
|
||||
return { success: true, data: data.output.choices[0].message.content };
|
||||
} else {
|
||||
return { success: false, error: "Unexpected response format" };
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[Z.AI] Chat completion error:", error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Chat completion failed",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async enhancePrompt(prompt: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert prompt engineer. Your task is to enhance user prompts to make them more precise, actionable, and effective for AI coding agents.
|
||||
|
||||
Apply these principles:
|
||||
1. Add specific context about project and requirements
|
||||
2. Clarify constraints and preferences
|
||||
3. Define expected output format clearly
|
||||
4. Include edge cases and error handling requirements
|
||||
5. Specify testing and validation criteria
|
||||
|
||||
Return ONLY the enhanced prompt, no explanations or extra text.`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Enhance this prompt for an AI coding agent:\n\n${prompt}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "glm-4.7", true);
|
||||
}
|
||||
|
||||
async generatePRD(idea: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert product manager and technical architect. Generate a comprehensive Product Requirements Document (PRD) based on user's idea.
|
||||
|
||||
Structure your PRD with these sections:
|
||||
1. Overview & Objectives
|
||||
2. User Personas & Use Cases
|
||||
3. Functional Requirements (prioritized by importance)
|
||||
4. Non-functional Requirements
|
||||
5. Technical Architecture Recommendations
|
||||
6. Success Metrics & KPIs
|
||||
|
||||
Use clear, specific language suitable for development teams.`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Generate a PRD for this idea:\n\n${idea}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "glm-4.7");
|
||||
}
|
||||
|
||||
async generateActionPlan(prd: string, model?: string): Promise<APIResponse<string>> {
|
||||
const systemMessage: ChatMessage = {
|
||||
role: "system",
|
||||
content: `You are an expert technical lead and project manager. Generate a detailed action plan based on the PRD.
|
||||
|
||||
Structure of action plan with:
|
||||
1. Task breakdown with priorities (High/Medium/Low)
|
||||
2. Dependencies between tasks
|
||||
3. Estimated effort for each task
|
||||
4. Recommended frameworks and technologies
|
||||
5. Architecture guidelines and best practices
|
||||
|
||||
Include specific recommendations for:
|
||||
- Frontend frameworks
|
||||
- Backend architecture
|
||||
- Database choices
|
||||
- Authentication/authorization
|
||||
- Deployment strategy`,
|
||||
};
|
||||
|
||||
const userMessage: ChatMessage = {
|
||||
role: "user",
|
||||
content: `Generate an action plan based on this PRD:\n\n${prd}`,
|
||||
};
|
||||
|
||||
return this.chatCompletion([systemMessage, userMessage], model || "glm-4.7", true);
|
||||
}
|
||||
|
||||
async listModels(): Promise<APIResponse<string[]>> {
|
||||
try {
|
||||
if (this.config.apiKey) {
|
||||
const response = await fetch(`${this.config.generalEndpoint}/models`, {
|
||||
headers: this.getHeaders(),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to list models: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const models = data.data?.map((m: any) => m.id) || [];
|
||||
|
||||
return { success: true, data: models };
|
||||
} else {
|
||||
console.log("[Z.AI] No API key, using fallback models");
|
||||
return { success: true, data: ["glm-4.7", "glm-4.6", "glm-4.5", "glm-4.5-air", "glm-4-flash", "glm-4-flashx"] };
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[Z.AI] listModels error:", error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : "Failed to list models",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
getAvailableModels(): string[] {
|
||||
return ["glm-4.7", "glm-4.6", "glm-4.5", "glm-4.5-air", "glm-4-flash", "glm-4-flashx"];
|
||||
}
|
||||
}
|
||||
|
||||
export default ZaiPlanService;
|
||||
109
lib/store.ts
Normal file
109
lib/store.ts
Normal file
@@ -0,0 +1,109 @@
|
||||
import { create } from "zustand";
|
||||
import type { ModelProvider, PromptEnhancement, PRD, ActionPlan } from "@/types";
|
||||
|
||||
interface AppState {
|
||||
currentPrompt: string;
|
||||
enhancedPrompt: string | null;
|
||||
prd: PRD | null;
|
||||
actionPlan: ActionPlan | null;
|
||||
selectedProvider: ModelProvider;
|
||||
selectedModels: Record<ModelProvider, string>;
|
||||
availableModels: Record<ModelProvider, string[]>;
|
||||
apiKeys: Record<ModelProvider, string>;
|
||||
qwenTokens?: {
|
||||
accessToken: string;
|
||||
refreshToken?: string;
|
||||
expiresAt?: number;
|
||||
};
|
||||
isProcessing: boolean;
|
||||
error: string | null;
|
||||
history: {
|
||||
id: string;
|
||||
prompt: string;
|
||||
timestamp: Date;
|
||||
}[];
|
||||
|
||||
setCurrentPrompt: (prompt: string) => void;
|
||||
setEnhancedPrompt: (enhanced: string | null) => void;
|
||||
setPRD: (prd: PRD) => void;
|
||||
setActionPlan: (plan: ActionPlan) => void;
|
||||
setSelectedProvider: (provider: ModelProvider) => void;
|
||||
setSelectedModel: (provider: ModelProvider, model: string) => void;
|
||||
setAvailableModels: (provider: ModelProvider, models: string[]) => void;
|
||||
setApiKey: (provider: ModelProvider, key: string) => void;
|
||||
setQwenTokens: (tokens: { accessToken: string; refreshToken?: string; expiresAt?: number }) => void;
|
||||
setProcessing: (processing: boolean) => void;
|
||||
setError: (error: string | null) => void;
|
||||
addToHistory: (prompt: string) => void;
|
||||
clearHistory: () => void;
|
||||
reset: () => void;
|
||||
}
|
||||
|
||||
const useStore = create<AppState>((set) => ({
|
||||
currentPrompt: "",
|
||||
enhancedPrompt: null,
|
||||
prd: null,
|
||||
actionPlan: null,
|
||||
selectedProvider: "qwen",
|
||||
selectedModels: {
|
||||
qwen: "qwen-coder-plus",
|
||||
ollama: "gpt-oss:120b",
|
||||
zai: "glm-4.7",
|
||||
},
|
||||
availableModels: {
|
||||
qwen: ["qwen-coder-plus", "qwen-coder-turbo", "qwen-coder-lite"],
|
||||
ollama: ["gpt-oss:120b", "llama3.1", "gemma3", "deepseek-r1", "qwen3"],
|
||||
zai: ["glm-4.7", "glm-4.6", "glm-4.5", "glm-4.5-air", "glm-4-flash", "glm-4-flashx"],
|
||||
},
|
||||
apiKeys: {
|
||||
qwen: "",
|
||||
ollama: "",
|
||||
zai: "",
|
||||
},
|
||||
isProcessing: false,
|
||||
error: null,
|
||||
history: [],
|
||||
|
||||
setCurrentPrompt: (prompt) => set({ currentPrompt: prompt }),
|
||||
setEnhancedPrompt: (enhanced) => set({ enhancedPrompt: enhanced }),
|
||||
setPRD: (prd) => set({ prd }),
|
||||
setActionPlan: (plan) => set({ actionPlan: plan }),
|
||||
setSelectedProvider: (provider) => set({ selectedProvider: provider }),
|
||||
setSelectedModel: (provider, model) =>
|
||||
set((state) => ({
|
||||
selectedModels: { ...state.selectedModels, [provider]: model },
|
||||
})),
|
||||
setAvailableModels: (provider, models) =>
|
||||
set((state) => ({
|
||||
availableModels: { ...state.availableModels, [provider]: models },
|
||||
})),
|
||||
setApiKey: (provider, key) =>
|
||||
set((state) => ({
|
||||
apiKeys: { ...state.apiKeys, [provider]: key },
|
||||
})),
|
||||
setQwenTokens: (tokens) => set({ qwenTokens: tokens }),
|
||||
setProcessing: (processing) => set({ isProcessing: processing }),
|
||||
setError: (error) => set({ error }),
|
||||
addToHistory: (prompt) =>
|
||||
set((state) => ({
|
||||
history: [
|
||||
...state.history,
|
||||
{
|
||||
id: Math.random().toString(36).substr(2, 9),
|
||||
prompt,
|
||||
timestamp: new Date(),
|
||||
},
|
||||
],
|
||||
})),
|
||||
clearHistory: () => set({ history: [] }),
|
||||
reset: () =>
|
||||
set({
|
||||
currentPrompt: "",
|
||||
enhancedPrompt: null,
|
||||
prd: null,
|
||||
actionPlan: null,
|
||||
error: null,
|
||||
}),
|
||||
}));
|
||||
|
||||
export default useStore;
|
||||
6
lib/utils.ts
Normal file
6
lib/utils.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
import { clsx, type ClassValue } from "clsx";
|
||||
import { twMerge } from "tailwind-merge";
|
||||
|
||||
export function cn(...inputs: ClassValue[]) {
|
||||
return twMerge(clsx(inputs));
|
||||
}
|
||||
Reference in New Issue
Block a user