Make Qwen OAuth work on Vercel

This commit is contained in:
Gemini AI
2025-12-26 00:37:05 +04:00
Unverified
parent a1a30e66fe
commit 79204352fe
7 changed files with 255 additions and 270 deletions

View File

@@ -14,3 +14,7 @@ OLLAMA_ENDPOINT=https://ollama.com/api
ZAI_API_KEY= ZAI_API_KEY=
ZAI_GENERAL_ENDPOINT=https://api.z.ai/api/paas/v4 ZAI_GENERAL_ENDPOINT=https://api.z.ai/api/paas/v4
ZAI_CODING_ENDPOINT=https://api.z.ai/api/coding/paas/v4 ZAI_CODING_ENDPOINT=https://api.z.ai/api/coding/paas/v4
# Site Configuration (Required for OAuth in production)
# Set to your production URL (e.g., https://your-app.vercel.app)
NEXT_PUBLIC_SITE_URL=http://localhost:6002

View File

@@ -0,0 +1,57 @@
import { NextRequest, NextResponse } from "next/server";
import { normalizeOllamaBase, DEFAULT_OLLAMA_BASE } from "../constants";
const API_PREFIX = "/api";
function getApiKey(request: NextRequest): string | null {
return request.headers.get("x-ollama-api-key");
}
function getBaseUrl(request: NextRequest): string {
const header = request.headers.get("x-ollama-endpoint");
if (header && header.trim().length > 0) {
return normalizeOllamaBase(header);
}
return DEFAULT_OLLAMA_BASE;
}
export async function POST(request: NextRequest) {
const apiKey = getApiKey(request);
if (!apiKey) {
return NextResponse.json(
{ error: "Ollama API key is required" },
{ status: 401 }
);
}
const body = await request.json();
const baseUrl = getBaseUrl(request);
const targetUrl = `${baseUrl}${API_PREFIX}/chat`;
try {
const response = await fetch(targetUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify(body),
});
const payload = await response.text();
if (!response.ok) {
return NextResponse.json(
{ error: "Ollama chat request failed", details: payload },
{ status: response.status }
);
}
return NextResponse.json(payload ? JSON.parse(payload) : {});
} catch (error) {
console.error("Ollama chat proxy failed", error);
return NextResponse.json(
{ error: "Ollama chat request failed" },
{ status: 500 }
);
}
}

View File

@@ -0,0 +1,7 @@
export const DEFAULT_OLLAMA_BASE = process.env.NEXT_PUBLIC_OLLAMA_ENDPOINT || process.env.OLLAMA_ENDPOINT || "https://ollama.com";
export function normalizeOllamaBase(url?: string): string {
if (!url) return DEFAULT_OLLAMA_BASE.replace(/\/$/, "");
const trimmed = url.trim();
if (!trimmed) return DEFAULT_OLLAMA_BASE.replace(/\/$/, "");
return trimmed.replace(/\/$/, "");
}

View File

@@ -0,0 +1,88 @@
import { NextRequest, NextResponse } from "next/server";
import { normalizeOllamaBase, DEFAULT_OLLAMA_BASE } from "../constants";
const API_PREFIX = "/api";
function getApiKey(request: NextRequest): string | null {
return request.headers.get("x-ollama-api-key");
}
function getBaseUrl(request: NextRequest): string {
const header = request.headers.get("x-ollama-endpoint");
if (header && header.trim().length > 0) {
return normalizeOllamaBase(header);
}
return DEFAULT_OLLAMA_BASE;
}
async function fetchModelNames(url: string, apiKey: string): Promise<string[]> {
const response = await fetch(`${url}`, {
method: "GET",
headers: {
Authorization: `Bearer ${apiKey}`,
Accept: "application/json",
},
});
if (!response.ok) {
const errorText = await response.text().catch(() => "Failed to parse");
throw new Error(`${response.status} ${response.statusText} - ${errorText}`);
}
const json = await response.json().catch(() => null);
const candidates = Array.isArray(json?.models)
? json.models
: Array.isArray(json?.data)
? json.data
: Array.isArray(json)
? json
: [];
const names: string[] = [];
for (const entry of candidates) {
if (!entry) continue;
const name = entry.name || entry.model || entry.id;
if (typeof name === "string" && name.length > 0) {
names.push(name);
}
}
return names;
}
export async function GET(request: NextRequest) {
const apiKey = getApiKey(request);
if (!apiKey) {
return NextResponse.json(
{ error: "Ollama API key is required" },
{ status: 401 }
);
}
const baseUrl = getBaseUrl(request);
const primaryUrl = `${baseUrl}${API_PREFIX}/v1/models`;
const fallbackUrl = `${baseUrl}${API_PREFIX}/tags`;
try {
const primaryModels = await fetchModelNames(primaryUrl, apiKey);
if (primaryModels.length > 0) {
return NextResponse.json({ models: primaryModels });
}
} catch (error) {
console.warn("[Ollama] Primary model fetch failed:", error);
}
try {
const fallbackModels = await fetchModelNames(fallbackUrl, apiKey);
if (fallbackModels.length > 0) {
return NextResponse.json({ models: fallbackModels });
}
} catch (error) {
console.warn("[Ollama] Fallback model fetch failed:", error);
}
return NextResponse.json(
{ models: [] },
{ status: 502 }
);
}

View File

@@ -5,7 +5,8 @@ export interface OllamaCloudConfig {
endpoint?: string; endpoint?: string;
} }
const DEFAULT_OLLAMA_ENDPOINT = "https://ollama.com"; const LOCAL_MODELS_URL = "/api/ollama/models";
const LOCAL_CHAT_URL = "/api/ollama/chat";
const DEFAULT_MODELS = [ const DEFAULT_MODELS = [
"gpt-oss:120b", "gpt-oss:120b",
"llama3.1:latest", "llama3.1:latest",
@@ -47,96 +48,35 @@ export class OllamaCloudService {
constructor(config: OllamaCloudConfig = {}) { constructor(config: OllamaCloudConfig = {}) {
this.config = { this.config = {
endpoint: config.endpoint || DEFAULT_OLLAMA_ENDPOINT,
apiKey: config.apiKey || process.env.OLLAMA_API_KEY, apiKey: config.apiKey || process.env.OLLAMA_API_KEY,
endpoint: config.endpoint,
}; };
} }
private getBaseUrl(): string { private ensureApiKey(): string {
const endpoint = this.config.endpoint || DEFAULT_OLLAMA_ENDPOINT;
return endpoint.replace(/\/$/, "");
}
private ensureApiPath(path: string): string {
if (path.startsWith("/api")) {
return path;
}
const normalized = path.startsWith("/") ? path : `/${path}`;
return `/api${normalized}`;
}
private getHeaders(extra: Record<string, string> = {}): Record<string, string> {
const headers: Record<string, string> = {
"Content-Type": "application/json",
...extra,
};
if (this.config.apiKey) { if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`; return this.config.apiKey;
}
throw new Error("API key is required. Please configure your Ollama API key in settings.");
}
private getHeaders(additional: Record<string, string> = {}) {
const headers: Record<string, string> = {
...additional,
"x-ollama-api-key": this.ensureApiKey(),
};
if (this.config.endpoint) {
headers["x-ollama-endpoint"] = this.config.endpoint;
} }
return headers; return headers;
} }
private async makeRequest( private async parseJsonResponse(response: Response): Promise<any> {
path: string, const text = await response.text();
options: RequestInit = {}, if (!text) return null;
useApiPrefix: boolean = true, return JSON.parse(text);
timeoutMs: number = 120_000
): Promise<Response> {
const url =
this.getBaseUrl() +
(useApiPrefix ? this.ensureApiPath(path) : (path.startsWith("/") ? path : `/${path}`));
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), timeoutMs);
const headers = {
...this.getHeaders(),
...(options.headers || {}),
};
try {
return await fetch(url, {
...options,
headers,
signal: controller.signal,
});
} finally {
clearTimeout(timeout);
}
}
private parseModelNamesFromArray(models: any[]): string[] {
return models
.map((entry) => entry?.name || entry?.model || entry?.id)
.filter((name): name is string => typeof name === "string" && name.length > 0);
}
private async fetchModelsFromV1(): Promise<string[]> {
const response = await this.makeRequest("/v1/models", { method: "GET" }, false);
if (!response.ok) {
const errorText = await response.text().catch(() => "Failed to parse response");
throw new Error(`Ollama /v1/models request failed: ${response.statusText} - ${errorText}`);
}
const json = await response.json().catch(() => null);
const entries = Array.isArray(json?.data) ? json.data : [];
const names = this.parseModelNamesFromArray(entries);
return names;
}
private async fetchModelsFromTags(): Promise<string[]> {
const response = await this.makeRequest("/tags", { method: "GET" }, true);
if (!response.ok) {
const errorText = await response.text().catch(() => "Failed to parse response");
throw new Error(`Ollama /tags request failed: ${response.statusText} - ${errorText}`);
}
const json = await response.json().catch(() => null);
const entries = Array.isArray(json?.models) ? json.models : Array.isArray(json) ? json : [];
const names = this.parseModelNamesFromArray(entries);
return names;
} }
async chatCompletion( async chatCompletion(
@@ -145,29 +85,29 @@ export class OllamaCloudService {
stream: boolean = false stream: boolean = false
): Promise<APIResponse<string>> { ): Promise<APIResponse<string>> {
try { try {
const response = await this.makeRequest( const response = await fetch(LOCAL_CHAT_URL, {
"/chat", method: "POST",
{ headers: this.getHeaders({ "Content-Type": "application/json" }),
method: "POST", body: JSON.stringify({
body: JSON.stringify({ model,
model, messages,
messages, stream,
stream, }),
}), });
},
true
);
if (!response.ok) { if (!response.ok) {
const errorText = await response.text(); const errorBody = await response.text();
throw new Error(`Chat completion failed (${response.status}): ${response.statusText} - ${errorText}`); throw new Error(
`Chat completion failed (${response.status}): ${response.statusText} - ${errorBody}`
);
} }
const data = await response.json(); const data = await this.parseJsonResponse(response);
if (data?.message?.content) {
if (data.message && data.message.content) {
return { success: true, data: data.message.content }; return { success: true, data: data.message.content };
} else if (data.choices && data.choices[0]?.message?.content) { }
if (data?.choices?.[0]?.message?.content) {
return { success: true, data: data.choices[0].message.content }; return { success: true, data: data.choices[0].message.content };
} }
@@ -183,28 +123,31 @@ export class OllamaCloudService {
async listModels(): Promise<APIResponse<string[]>> { async listModels(): Promise<APIResponse<string[]>> {
try { try {
const primary = await this.fetchModelsFromV1(); const response = await fetch(LOCAL_MODELS_URL, {
if (primary.length > 0) { headers: this.getHeaders(),
this.availableModels = primary; });
return { success: true, data: primary };
if (!response.ok) {
const errorBody = await response.text();
throw new Error(`List models failed: ${response.statusText} - ${errorBody}`);
} }
const fallback = await this.fetchModelsFromTags(); const data = await this.parseJsonResponse(response);
if (fallback.length > 0) { const models: string[] = Array.isArray(data?.models) ? data.models : [];
this.availableModels = fallback;
return { success: true, data: fallback };
}
this.availableModels = DEFAULT_MODELS; if (models.length === 0) {
return { success: true, data: DEFAULT_MODELS };
} catch (error) {
console.error("[Ollama] listModels error:", error);
if (DEFAULT_MODELS.length > 0) {
this.availableModels = DEFAULT_MODELS; this.availableModels = DEFAULT_MODELS;
return { success: true, data: DEFAULT_MODELS }; return { success: true, data: DEFAULT_MODELS };
} }
this.availableModels = models;
return { success: true, data: models };
} catch (error) {
console.error("[Ollama] listModels error:", error);
if (DEFAULT_MODELS.length > 0) {
this.availableModels = DEFAULT_MODELS;
return { success: true, data: DEFAULT_MODELS };
}
return { return {
success: false, success: false,
error: error instanceof Error ? error.message : "Failed to list models", error: error instanceof Error ? error.message : "Failed to list models",
@@ -215,146 +158,6 @@ export class OllamaCloudService {
getAvailableModels(): string[] { getAvailableModels(): string[] {
return this.availableModels.length > 0 ? this.availableModels : DEFAULT_MODELS; return this.availableModels.length > 0 ? this.availableModels : DEFAULT_MODELS;
} }
async enhancePrompt(prompt: string, model?: string): Promise<APIResponse<string>> {
const systemMessage: ChatMessage = {
role: "system",
content: `You are an expert prompt engineer. Your task is to enhance user prompts to make them more precise, actionable, and effective for AI coding agents.
Apply these principles:
1. Add specific context about project and requirements
2. Clarify constraints and preferences
3. Define expected output format clearly
4. Include edge cases and error handling requirements
5. Specify testing and validation criteria
Return ONLY the enhanced prompt, no explanations.`,
};
const userMessage: ChatMessage = {
role: "user",
content: `Enhance this prompt for an AI coding agent:\n\n${prompt}`,
};
return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b");
}
async generatePRD(idea: string, model?: string): Promise<APIResponse<string>> {
const systemMessage: ChatMessage = {
role: "system",
content: `You are an expert product manager and technical architect. Generate a comprehensive Product Requirements Document (PRD) based on user's idea.
Structure your PRD with these sections:
1. Overview & Objectives
2. User Personas & Use Cases
3. Functional Requirements (prioritized)
4. Non-functional Requirements
5. Technical Architecture Recommendations
6. Success Metrics & KPIs
Use clear, specific language suitable for development teams.`,
};
const userMessage: ChatMessage = {
role: "user",
content: `Generate a PRD for this idea:\n\n${idea}`,
};
return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b");
}
async generateActionPlan(prd: string, model?: string): Promise<APIResponse<string>> {
const systemMessage: ChatMessage = {
role: "system",
content: `You are an expert technical lead and project manager. Generate a detailed action plan based on PRD.
Structure of action plan with:
1. Task breakdown with priorities (High/Medium/Low)
2. Dependencies between tasks
3. Estimated effort for each task
4. Recommended frameworks and technologies
5. Architecture guidelines and best practices
Include specific recommendations for:
- Frontend frameworks
- Backend architecture
- Database choices
- Authentication/authorization
- Deployment strategy`,
};
const userMessage: ChatMessage = {
role: "user",
content: `Generate an action plan based on this PRD:\n\n${prd}`,
};
return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b");
}
async generateUXDesignerPrompt(appDescription: string, model?: string): Promise<APIResponse<string>> {
const systemMessage: ChatMessage = {
role: "system",
content: `You are a world-class UX/UI designer with deep expertise in human-centered design principles, user research, interaction design, visual design systems, and modern design tools (Figma, Sketch, Adobe XD).
Your task is to create an exceptional, detailed prompt for generating the best possible UX design for a given app description.
Generate a comprehensive UX design prompt that includes:
1. USER RESEARCH & PERSONAS
- Primary target users and their motivations
- User pain points and needs
- User journey maps
- Persona archetypes with demographics and goals
2. INFORMATION ARCHITECTURE
- Content hierarchy and organization
- Navigation structure and patterns
- User flows and key pathways
- Site map or app structure
3. VISUAL DESIGN SYSTEM
- Color palette recommendations (primary, secondary, accent, neutral)
- Typography hierarchy and font pairings
- Component library approach
- Spacing, sizing, and layout grids
- Iconography style and set
4. INTERACTION DESIGN
- Micro-interactions and animations
- Gesture patterns for touch interfaces
- Loading states and empty states
- Error handling and feedback mechanisms
- Accessibility considerations (WCAG compliance)
5. KEY SCREENS & COMPONENTS
- Core screens that need detailed design
- Critical components (buttons, forms, cards, navigation)
- Data visualization needs
- Responsive design requirements (mobile, tablet, desktop)
6. DESIGN DELIVERABLES
- Wireframes vs. high-fidelity mockups
- Design system documentation needs
- Prototyping requirements
- Handoff specifications for developers
7. COMPETITIVE INSIGHTS
- Design patterns from successful apps in this category
- Opportunities to differentiate
- Modern design trends to consider
The output should be a detailed, actionable prompt that a designer or AI image generator can use to create world-class UX designs.
Make the prompt specific, inspiring, and comprehensive. Use professional UX terminology.`,
};
const userMessage: ChatMessage = {
role: "user",
content: `Create the BEST EVER UX design prompt for this app:\n\n${appDescription}`,
};
return this.chatCompletion([systemMessage, userMessage], model || "gpt-oss:120b");
}
} }
export default OllamaCloudService; export default OllamaCloudService;

View File

@@ -1,9 +1,18 @@
import type { ChatMessage, APIResponse } from "@/types"; import type { ChatMessage, APIResponse } from "@/types";
const DEFAULT_QWEN_ENDPOINT = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"; const DEFAULT_QWEN_ENDPOINT = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1";
const DEFAULT_OAUTH_BASE = "/api/qwen";
const TOKEN_STORAGE_KEY = "promptarch-qwen-tokens"; const TOKEN_STORAGE_KEY = "promptarch-qwen-tokens";
function getOAuthBaseUrl(): string {
if (typeof window !== "undefined") {
return `${window.location.origin}/api/qwen`;
}
if (process.env.NEXT_PUBLIC_SITE_URL) {
return `${process.env.NEXT_PUBLIC_SITE_URL}/api/qwen`;
}
return "/api/qwen";
}
export interface QwenOAuthConfig { export interface QwenOAuthConfig {
apiKey?: string; apiKey?: string;
endpoint?: string; endpoint?: string;
@@ -39,7 +48,7 @@ export class QwenOAuthService {
constructor(config: QwenOAuthConfig = {}) { constructor(config: QwenOAuthConfig = {}) {
this.endpoint = config.endpoint || DEFAULT_QWEN_ENDPOINT; this.endpoint = config.endpoint || DEFAULT_QWEN_ENDPOINT;
this.oauthBaseUrl = config.oauthBaseUrl || DEFAULT_OAUTH_BASE; this.oauthBaseUrl = config.oauthBaseUrl || getOAuthBaseUrl();
this.apiKey = config.apiKey || process.env.QWEN_API_KEY || undefined; this.apiKey = config.apiKey || process.env.QWEN_API_KEY || undefined;
if (config.accessToken) { if (config.accessToken) {
@@ -104,7 +113,7 @@ export class QwenOAuthService {
} }
private hydrateTokens() { private hydrateTokens() {
if (this.storageHydrated || typeof window === "undefined") { if (this.storageHydrated || typeof window === "undefined" || typeof window.localStorage === "undefined") {
return; return;
} }
@@ -113,7 +122,8 @@ export class QwenOAuthService {
if (stored) { if (stored) {
this.token = JSON.parse(stored); this.token = JSON.parse(stored);
} }
} catch { } catch (error) {
console.warn("[QwenOAuth] Failed to read tokens from localStorage:", error);
this.token = null; this.token = null;
} finally { } finally {
this.storageHydrated = true; this.storageHydrated = true;
@@ -126,14 +136,18 @@ export class QwenOAuthService {
} }
private persistToken(token: QwenOAuthToken | null) { private persistToken(token: QwenOAuthToken | null) {
if (typeof window === "undefined") { if (typeof window === "undefined" || typeof window.localStorage === "undefined") {
return; return;
} }
if (token) { try {
window.localStorage.setItem(TOKEN_STORAGE_KEY, JSON.stringify(token)); if (token) {
} else { window.localStorage.setItem(TOKEN_STORAGE_KEY, JSON.stringify(token));
window.localStorage.removeItem(TOKEN_STORAGE_KEY); } else {
window.localStorage.removeItem(TOKEN_STORAGE_KEY);
}
} catch (error) {
console.warn("[QwenOAuth] Failed to persist tokens to localStorage:", error);
} }
} }
@@ -227,17 +241,23 @@ export class QwenOAuthService {
throw new Error("Qwen OAuth is only supported in the browser"); throw new Error("Qwen OAuth is only supported in the browser");
} }
const codeVerifier = this.generateCodeVerifier();
const codeChallenge = await this.generateCodeChallenge(codeVerifier);
const deviceAuth = await this.requestDeviceAuthorization(codeChallenge);
const popup = window.open( const popup = window.open(
deviceAuth.verification_uri_complete, "",
"qwen-oauth", "qwen-oauth",
"width=500,height=600,scrollbars=yes,resizable=yes" "width=500,height=600,scrollbars=yes,resizable=yes"
); );
if (!popup) { const codeVerifier = this.generateCodeVerifier();
const codeChallenge = await this.generateCodeChallenge(codeVerifier);
const deviceAuth = await this.requestDeviceAuthorization(codeChallenge);
if (popup) {
try {
popup.location.href = deviceAuth.verification_uri_complete;
} catch {
// ignore cross-origin restrictions
}
} else {
window.alert( window.alert(
`Open this URL to authenticate:\n${deviceAuth.verification_uri_complete}\n\nUser code: ${deviceAuth.user_code}` `Open this URL to authenticate:\n${deviceAuth.verification_uri_complete}\n\nUser code: ${deviceAuth.user_code}`
); );

View File

@@ -2,5 +2,11 @@
"buildCommand": "npm run build", "buildCommand": "npm run build",
"outputDirectory": ".next", "outputDirectory": ".next",
"framework": "nextjs", "framework": "nextjs",
"devCommand": "npm run dev" "devCommand": "npm run dev",
"env": {
"NEXT_PUBLIC_SITE_URL": {
"description": "The production URL of your app (e.g., https://your-app.vercel.app)",
"value": "https://your-app.vercel.app"
}
}
} }