diff --git a/.env.example b/.env.example index c44386e..e3d7904 100644 --- a/.env.example +++ b/.env.example @@ -14,3 +14,7 @@ OLLAMA_ENDPOINT=https://ollama.com/api ZAI_API_KEY= ZAI_GENERAL_ENDPOINT=https://api.z.ai/api/paas/v4 ZAI_CODING_ENDPOINT=https://api.z.ai/api/coding/paas/v4 + +# Site Configuration (Required for OAuth in production) +# Set to your production URL (e.g., https://your-app.vercel.app) +NEXT_PUBLIC_SITE_URL=http://localhost:6002 diff --git a/app/api/ollama/chat/route.ts b/app/api/ollama/chat/route.ts new file mode 100644 index 0000000..6b0841c --- /dev/null +++ b/app/api/ollama/chat/route.ts @@ -0,0 +1,57 @@ +import { NextRequest, NextResponse } from "next/server"; +import { normalizeOllamaBase, DEFAULT_OLLAMA_BASE } from "../constants"; + +const API_PREFIX = "/api"; + +function getApiKey(request: NextRequest): string | null { + return request.headers.get("x-ollama-api-key"); +} + +function getBaseUrl(request: NextRequest): string { + const header = request.headers.get("x-ollama-endpoint"); + if (header && header.trim().length > 0) { + return normalizeOllamaBase(header); + } + return DEFAULT_OLLAMA_BASE; +} + +export async function POST(request: NextRequest) { + const apiKey = getApiKey(request); + if (!apiKey) { + return NextResponse.json( + { error: "Ollama API key is required" }, + { status: 401 } + ); + } + + const body = await request.json(); + const baseUrl = getBaseUrl(request); + const targetUrl = `${baseUrl}${API_PREFIX}/chat`; + + try { + const response = await fetch(targetUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + }, + body: JSON.stringify(body), + }); + + const payload = await response.text(); + if (!response.ok) { + return NextResponse.json( + { error: "Ollama chat request failed", details: payload }, + { status: response.status } + ); + } + + return NextResponse.json(payload ? JSON.parse(payload) : {}); + } catch (error) { + console.error("Ollama chat proxy failed", error); + return NextResponse.json( + { error: "Ollama chat request failed" }, + { status: 500 } + ); + } +} diff --git a/app/api/ollama/constants.ts b/app/api/ollama/constants.ts new file mode 100644 index 0000000..fe40777 --- /dev/null +++ b/app/api/ollama/constants.ts @@ -0,0 +1,7 @@ +export const DEFAULT_OLLAMA_BASE = process.env.NEXT_PUBLIC_OLLAMA_ENDPOINT || process.env.OLLAMA_ENDPOINT || "https://ollama.com"; +export function normalizeOllamaBase(url?: string): string { + if (!url) return DEFAULT_OLLAMA_BASE.replace(/\/$/, ""); + const trimmed = url.trim(); + if (!trimmed) return DEFAULT_OLLAMA_BASE.replace(/\/$/, ""); + return trimmed.replace(/\/$/, ""); +} diff --git a/app/api/ollama/models/route.ts b/app/api/ollama/models/route.ts new file mode 100644 index 0000000..99419db --- /dev/null +++ b/app/api/ollama/models/route.ts @@ -0,0 +1,88 @@ +import { NextRequest, NextResponse } from "next/server"; +import { normalizeOllamaBase, DEFAULT_OLLAMA_BASE } from "../constants"; + +const API_PREFIX = "/api"; + +function getApiKey(request: NextRequest): string | null { + return request.headers.get("x-ollama-api-key"); +} + +function getBaseUrl(request: NextRequest): string { + const header = request.headers.get("x-ollama-endpoint"); + if (header && header.trim().length > 0) { + return normalizeOllamaBase(header); + } + return DEFAULT_OLLAMA_BASE; +} + +async function fetchModelNames(url: string, apiKey: string): Promise { + const response = await fetch(`${url}`, { + method: "GET", + headers: { + Authorization: `Bearer ${apiKey}`, + Accept: "application/json", + }, + }); + + if (!response.ok) { + const errorText = await response.text().catch(() => "Failed to parse"); + throw new Error(`${response.status} ${response.statusText} - ${errorText}`); + } + + const json = await response.json().catch(() => null); + const candidates = Array.isArray(json?.models) + ? json.models + : Array.isArray(json?.data) + ? json.data + : Array.isArray(json) + ? json + : []; + + const names: string[] = []; + for (const entry of candidates) { + if (!entry) continue; + const name = entry.name || entry.model || entry.id; + if (typeof name === "string" && name.length > 0) { + names.push(name); + } + } + + return names; +} + +export async function GET(request: NextRequest) { + const apiKey = getApiKey(request); + if (!apiKey) { + return NextResponse.json( + { error: "Ollama API key is required" }, + { status: 401 } + ); + } + + const baseUrl = getBaseUrl(request); + const primaryUrl = `${baseUrl}${API_PREFIX}/v1/models`; + const fallbackUrl = `${baseUrl}${API_PREFIX}/tags`; + + try { + const primaryModels = await fetchModelNames(primaryUrl, apiKey); + if (primaryModels.length > 0) { + return NextResponse.json({ models: primaryModels }); + } + } catch (error) { + console.warn("[Ollama] Primary model fetch failed:", error); + } + + try { + const fallbackModels = await fetchModelNames(fallbackUrl, apiKey); + if (fallbackModels.length > 0) { + return NextResponse.json({ models: fallbackModels }); + } + } catch (error) { + console.warn("[Ollama] Fallback model fetch failed:", error); + } + + return NextResponse.json( + { models: [] }, + { status: 502 } + ); +} diff --git a/lib/services/ollama-cloud.ts b/lib/services/ollama-cloud.ts index 9881f99..fee6a2e 100644 --- a/lib/services/ollama-cloud.ts +++ b/lib/services/ollama-cloud.ts @@ -5,7 +5,8 @@ export interface OllamaCloudConfig { endpoint?: string; } -const DEFAULT_OLLAMA_ENDPOINT = "https://ollama.com"; +const LOCAL_MODELS_URL = "/api/ollama/models"; +const LOCAL_CHAT_URL = "/api/ollama/chat"; const DEFAULT_MODELS = [ "gpt-oss:120b", "llama3.1:latest", @@ -47,96 +48,35 @@ export class OllamaCloudService { constructor(config: OllamaCloudConfig = {}) { this.config = { - endpoint: config.endpoint || DEFAULT_OLLAMA_ENDPOINT, apiKey: config.apiKey || process.env.OLLAMA_API_KEY, + endpoint: config.endpoint, }; } - private getBaseUrl(): string { - const endpoint = this.config.endpoint || DEFAULT_OLLAMA_ENDPOINT; - return endpoint.replace(/\/$/, ""); - } - - private ensureApiPath(path: string): string { - if (path.startsWith("/api")) { - return path; - } - const normalized = path.startsWith("/") ? path : `/${path}`; - return `/api${normalized}`; - } - - private getHeaders(extra: Record = {}): Record { - const headers: Record = { - "Content-Type": "application/json", - ...extra, - }; - + private ensureApiKey(): string { if (this.config.apiKey) { - headers["Authorization"] = `Bearer ${this.config.apiKey}`; + return this.config.apiKey; + } + throw new Error("API key is required. Please configure your Ollama API key in settings."); + } + + private getHeaders(additional: Record = {}) { + const headers: Record = { + ...additional, + "x-ollama-api-key": this.ensureApiKey(), + }; + + if (this.config.endpoint) { + headers["x-ollama-endpoint"] = this.config.endpoint; } return headers; } - private async makeRequest( - path: string, - options: RequestInit = {}, - useApiPrefix: boolean = true, - timeoutMs: number = 120_000 - ): Promise { - const url = - this.getBaseUrl() + - (useApiPrefix ? this.ensureApiPath(path) : (path.startsWith("/") ? path : `/${path}`)); - - const controller = new AbortController(); - const timeout = setTimeout(() => controller.abort(), timeoutMs); - - const headers = { - ...this.getHeaders(), - ...(options.headers || {}), - }; - - try { - return await fetch(url, { - ...options, - headers, - signal: controller.signal, - }); - } finally { - clearTimeout(timeout); - } - } - - private parseModelNamesFromArray(models: any[]): string[] { - return models - .map((entry) => entry?.name || entry?.model || entry?.id) - .filter((name): name is string => typeof name === "string" && name.length > 0); - } - - private async fetchModelsFromV1(): Promise { - const response = await this.makeRequest("/v1/models", { method: "GET" }, false); - if (!response.ok) { - const errorText = await response.text().catch(() => "Failed to parse response"); - throw new Error(`Ollama /v1/models request failed: ${response.statusText} - ${errorText}`); - } - - const json = await response.json().catch(() => null); - const entries = Array.isArray(json?.data) ? json.data : []; - const names = this.parseModelNamesFromArray(entries); - return names; - } - - private async fetchModelsFromTags(): Promise { - const response = await this.makeRequest("/tags", { method: "GET" }, true); - if (!response.ok) { - const errorText = await response.text().catch(() => "Failed to parse response"); - throw new Error(`Ollama /tags request failed: ${response.statusText} - ${errorText}`); - } - - const json = await response.json().catch(() => null); - const entries = Array.isArray(json?.models) ? json.models : Array.isArray(json) ? json : []; - const names = this.parseModelNamesFromArray(entries); - return names; + private async parseJsonResponse(response: Response): Promise { + const text = await response.text(); + if (!text) return null; + return JSON.parse(text); } async chatCompletion( @@ -145,29 +85,29 @@ export class OllamaCloudService { stream: boolean = false ): Promise> { try { - const response = await this.makeRequest( - "/chat", - { - method: "POST", - body: JSON.stringify({ - model, - messages, - stream, - }), - }, - true - ); + const response = await fetch(LOCAL_CHAT_URL, { + method: "POST", + headers: this.getHeaders({ "Content-Type": "application/json" }), + body: JSON.stringify({ + model, + messages, + stream, + }), + }); if (!response.ok) { - const errorText = await response.text(); - throw new Error(`Chat completion failed (${response.status}): ${response.statusText} - ${errorText}`); + const errorBody = await response.text(); + throw new Error( + `Chat completion failed (${response.status}): ${response.statusText} - ${errorBody}` + ); } - const data = await response.json(); - - if (data.message && data.message.content) { + const data = await this.parseJsonResponse(response); + if (data?.message?.content) { return { success: true, data: data.message.content }; - } else if (data.choices && data.choices[0]?.message?.content) { + } + + if (data?.choices?.[0]?.message?.content) { return { success: true, data: data.choices[0].message.content }; } @@ -183,28 +123,31 @@ export class OllamaCloudService { async listModels(): Promise> { try { - const primary = await this.fetchModelsFromV1(); - if (primary.length > 0) { - this.availableModels = primary; - return { success: true, data: primary }; + const response = await fetch(LOCAL_MODELS_URL, { + headers: this.getHeaders(), + }); + + if (!response.ok) { + const errorBody = await response.text(); + throw new Error(`List models failed: ${response.statusText} - ${errorBody}`); } - const fallback = await this.fetchModelsFromTags(); - if (fallback.length > 0) { - this.availableModels = fallback; - return { success: true, data: fallback }; - } + const data = await this.parseJsonResponse(response); + const models: string[] = Array.isArray(data?.models) ? data.models : []; - this.availableModels = DEFAULT_MODELS; - return { success: true, data: DEFAULT_MODELS }; - } catch (error) { - console.error("[Ollama] listModels error:", error); - - if (DEFAULT_MODELS.length > 0) { + if (models.length === 0) { this.availableModels = DEFAULT_MODELS; return { success: true, data: DEFAULT_MODELS }; } + this.availableModels = models; + return { success: true, data: models }; + } catch (error) { + console.error("[Ollama] listModels error:", error); + if (DEFAULT_MODELS.length > 0) { + this.availableModels = DEFAULT_MODELS; + return { success: true, data: DEFAULT_MODELS }; + } return { success: false, error: error instanceof Error ? error.message : "Failed to list models", @@ -215,146 +158,6 @@ export class OllamaCloudService { getAvailableModels(): string[] { return this.availableModels.length > 0 ? this.availableModels : DEFAULT_MODELS; } - - async enhancePrompt(prompt: string, model?: string): Promise> { - const systemMessage: ChatMessage = { - role: "system", - content: `You are an expert prompt engineer. Your task is to enhance user prompts to make them more precise, actionable, and effective for AI coding agents. - -Apply these principles: -1. Add specific context about project and requirements -2. Clarify constraints and preferences -3. Define expected output format clearly -4. Include edge cases and error handling requirements -5. Specify testing and validation criteria - -Return ONLY the enhanced prompt, no explanations.`, - }; - - const userMessage: ChatMessage = { - role: "user", - content: `Enhance this prompt for an AI coding agent:\n\n${prompt}`, - }; - - return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b"); - } - - async generatePRD(idea: string, model?: string): Promise> { - const systemMessage: ChatMessage = { - role: "system", - content: `You are an expert product manager and technical architect. Generate a comprehensive Product Requirements Document (PRD) based on user's idea. - -Structure your PRD with these sections: -1. Overview & Objectives -2. User Personas & Use Cases -3. Functional Requirements (prioritized) -4. Non-functional Requirements -5. Technical Architecture Recommendations -6. Success Metrics & KPIs - -Use clear, specific language suitable for development teams.`, - }; - - const userMessage: ChatMessage = { - role: "user", - content: `Generate a PRD for this idea:\n\n${idea}`, - }; - - return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b"); - } - - async generateActionPlan(prd: string, model?: string): Promise> { - const systemMessage: ChatMessage = { - role: "system", - content: `You are an expert technical lead and project manager. Generate a detailed action plan based on PRD. - -Structure of action plan with: -1. Task breakdown with priorities (High/Medium/Low) -2. Dependencies between tasks -3. Estimated effort for each task -4. Recommended frameworks and technologies -5. Architecture guidelines and best practices - -Include specific recommendations for: -- Frontend frameworks -- Backend architecture -- Database choices -- Authentication/authorization -- Deployment strategy`, - }; - - const userMessage: ChatMessage = { - role: "user", - content: `Generate an action plan based on this PRD:\n\n${prd}`, - }; - - return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b"); - } - - async generateUXDesignerPrompt(appDescription: string, model?: string): Promise> { - const systemMessage: ChatMessage = { - role: "system", - content: `You are a world-class UX/UI designer with deep expertise in human-centered design principles, user research, interaction design, visual design systems, and modern design tools (Figma, Sketch, Adobe XD). - -Your task is to create an exceptional, detailed prompt for generating the best possible UX design for a given app description. - -Generate a comprehensive UX design prompt that includes: - -1. USER RESEARCH & PERSONAS - - Primary target users and their motivations - - User pain points and needs - - User journey maps - - Persona archetypes with demographics and goals - -2. INFORMATION ARCHITECTURE - - Content hierarchy and organization - - Navigation structure and patterns - - User flows and key pathways - - Site map or app structure - -3. VISUAL DESIGN SYSTEM - - Color palette recommendations (primary, secondary, accent, neutral) - - Typography hierarchy and font pairings - - Component library approach - - Spacing, sizing, and layout grids - - Iconography style and set - -4. INTERACTION DESIGN - - Micro-interactions and animations - - Gesture patterns for touch interfaces - - Loading states and empty states - - Error handling and feedback mechanisms - - Accessibility considerations (WCAG compliance) - -5. KEY SCREENS & COMPONENTS - - Core screens that need detailed design - - Critical components (buttons, forms, cards, navigation) - - Data visualization needs - - Responsive design requirements (mobile, tablet, desktop) - -6. DESIGN DELIVERABLES - - Wireframes vs. high-fidelity mockups - - Design system documentation needs - - Prototyping requirements - - Handoff specifications for developers - -7. COMPETITIVE INSIGHTS - - Design patterns from successful apps in this category - - Opportunities to differentiate - - Modern design trends to consider - -The output should be a detailed, actionable prompt that a designer or AI image generator can use to create world-class UX designs. - -Make the prompt specific, inspiring, and comprehensive. Use professional UX terminology.`, - }; - - const userMessage: ChatMessage = { - role: "user", - content: `Create the BEST EVER UX design prompt for this app:\n\n${appDescription}`, - }; - - return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b"); - } } export default OllamaCloudService; diff --git a/lib/services/qwen-oauth.ts b/lib/services/qwen-oauth.ts index a33bb80..01d586b 100644 --- a/lib/services/qwen-oauth.ts +++ b/lib/services/qwen-oauth.ts @@ -1,9 +1,18 @@ import type { ChatMessage, APIResponse } from "@/types"; const DEFAULT_QWEN_ENDPOINT = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"; -const DEFAULT_OAUTH_BASE = "/api/qwen"; const TOKEN_STORAGE_KEY = "promptarch-qwen-tokens"; +function getOAuthBaseUrl(): string { + if (typeof window !== "undefined") { + return `${window.location.origin}/api/qwen`; + } + if (process.env.NEXT_PUBLIC_SITE_URL) { + return `${process.env.NEXT_PUBLIC_SITE_URL}/api/qwen`; + } + return "/api/qwen"; +} + export interface QwenOAuthConfig { apiKey?: string; endpoint?: string; @@ -39,7 +48,7 @@ export class QwenOAuthService { constructor(config: QwenOAuthConfig = {}) { this.endpoint = config.endpoint || DEFAULT_QWEN_ENDPOINT; - this.oauthBaseUrl = config.oauthBaseUrl || DEFAULT_OAUTH_BASE; + this.oauthBaseUrl = config.oauthBaseUrl || getOAuthBaseUrl(); this.apiKey = config.apiKey || process.env.QWEN_API_KEY || undefined; if (config.accessToken) { @@ -104,7 +113,7 @@ export class QwenOAuthService { } private hydrateTokens() { - if (this.storageHydrated || typeof window === "undefined") { + if (this.storageHydrated || typeof window === "undefined" || typeof window.localStorage === "undefined") { return; } @@ -113,7 +122,8 @@ export class QwenOAuthService { if (stored) { this.token = JSON.parse(stored); } - } catch { + } catch (error) { + console.warn("[QwenOAuth] Failed to read tokens from localStorage:", error); this.token = null; } finally { this.storageHydrated = true; @@ -126,14 +136,18 @@ export class QwenOAuthService { } private persistToken(token: QwenOAuthToken | null) { - if (typeof window === "undefined") { + if (typeof window === "undefined" || typeof window.localStorage === "undefined") { return; } - if (token) { - window.localStorage.setItem(TOKEN_STORAGE_KEY, JSON.stringify(token)); - } else { - window.localStorage.removeItem(TOKEN_STORAGE_KEY); + try { + if (token) { + window.localStorage.setItem(TOKEN_STORAGE_KEY, JSON.stringify(token)); + } else { + window.localStorage.removeItem(TOKEN_STORAGE_KEY); + } + } catch (error) { + console.warn("[QwenOAuth] Failed to persist tokens to localStorage:", error); } } @@ -227,17 +241,23 @@ export class QwenOAuthService { throw new Error("Qwen OAuth is only supported in the browser"); } - const codeVerifier = this.generateCodeVerifier(); - const codeChallenge = await this.generateCodeChallenge(codeVerifier); - const deviceAuth = await this.requestDeviceAuthorization(codeChallenge); - const popup = window.open( - deviceAuth.verification_uri_complete, + "", "qwen-oauth", "width=500,height=600,scrollbars=yes,resizable=yes" ); - if (!popup) { + const codeVerifier = this.generateCodeVerifier(); + const codeChallenge = await this.generateCodeChallenge(codeVerifier); + const deviceAuth = await this.requestDeviceAuthorization(codeChallenge); + + if (popup) { + try { + popup.location.href = deviceAuth.verification_uri_complete; + } catch { + // ignore cross-origin restrictions + } + } else { window.alert( `Open this URL to authenticate:\n${deviceAuth.verification_uri_complete}\n\nUser code: ${deviceAuth.user_code}` ); diff --git a/vercel.json b/vercel.json index 9dafd73..2ecc9bf 100644 --- a/vercel.json +++ b/vercel.json @@ -2,5 +2,11 @@ "buildCommand": "npm run build", "outputDirectory": ".next", "framework": "nextjs", - "devCommand": "npm run dev" + "devCommand": "npm run dev", + "env": { + "NEXT_PUBLIC_SITE_URL": { + "description": "The production URL of your app (e.g., https://your-app.vercel.app)", + "value": "https://your-app.vercel.app" + } + } }