Fix Ollama-cloud.ts getBaseUrl endpoint undefined error

This commit is contained in:
Gemini AI
2025-12-25 23:41:41 +04:00
Unverified
parent 07dbe552f7
commit a1a30e66fe
2 changed files with 152 additions and 103 deletions

View File

@@ -5,133 +5,8 @@ export interface OllamaCloudConfig {
endpoint?: string; endpoint?: string;
} }
export interface OllamaModel { const DEFAULT_OLLAMA_ENDPOINT = "https://ollama.com";
name: string; const DEFAULT_MODELS = [
size?: number;
digest?: string;
}
export class OllamaCloudService {
private config: OllamaCloudConfig;
private availableModels: string[] = [];
constructor(config: OllamaCloudConfig = {}) {
this.config = {
endpoint: config.endpoint || "https://ollama.com/api",
apiKey: config.apiKey || process.env.OLLAMA_API_KEY,
};
}
private getHeaders(): Record<string, string> {
const headers: Record<string, string> = {
"Content-Type": "application/json",
};
if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`;
}
return headers;
}
async chatCompletion(
messages: ChatMessage[],
model: string = "gpt-oss:120b",
stream: boolean = false
): Promise<APIResponse<string>> {
try {
if (!this.config.apiKey) {
throw new Error("API key is required. Please configure your Ollama API key in settings.");
}
console.log("[Ollama] API call:", { endpoint: this.config.endpoint, model, messages });
const response = await fetch(`${this.config.endpoint}/chat`, {
method: "POST",
headers: this.getHeaders(),
body: JSON.stringify({
model,
messages,
stream,
}),
});
console.log("[Ollama] Response status:", response.status, response.statusText);
if (!response.ok) {
const errorText = await response.text();
console.error("[Ollama] Error response:", errorText);
throw new Error(`Chat completion failed (${response.status}): ${response.statusText} - ${errorText}`);
}
const data = await response.json();
console.log("[Ollama] Response data:", data);
if (data.message && data.message.content) {
return { success: true, data: data.message.content };
} else if (data.choices && data.choices[0]) {
return { success: true, data: data.choices[0].message.content };
} else {
return { success: false, error: "Unexpected response format" };
}
} catch (error) {
console.error("[Ollama] Chat completion error:", error);
return {
success: false,
error: error instanceof Error ? error.message : "Chat completion failed",
};
}
}
async listModels(): Promise<APIResponse<string[]>> {
try {
if (this.config.apiKey) {
console.log("[Ollama] Listing models from:", `${this.config.endpoint}/tags`);
const response = await fetch(`${this.config.endpoint}/tags`, {
headers: this.getHeaders(),
});
console.log("[Ollama] List models response status:", response.status, response.statusText);
if (!response.ok) {
throw new Error(`Failed to list models: ${response.statusText}`);
}
const data = await response.json();
console.log("[Ollama] Models data:", data);
let models: string[] = [];
if (Array.isArray(data.models)) {
models = data.models.map((m: OllamaModel) => m.name);
} else if (Array.isArray(data)) {
models = data.map((m: OllamaModel) => m.name);
} else if (data.model) {
models = [data.model.name];
}
this.availableModels = models;
return { success: true, data: models };
} else {
console.log("[Ollama] No API key, using fallback models");
return { success: true, data: ["gpt-oss:120b", "llama3.1", "gemma3", "deepseek-r1", "qwen3"] };
}
} catch (error) {
console.error("[Ollama] listModels error:", error);
return {
success: false,
error: error instanceof Error ? error.message : "Failed to list models",
};
}
}
getAvailableModels(): string[] {
if (this.availableModels.length > 0) {
return this.availableModels;
}
return [
"gpt-oss:120b", "gpt-oss:120b",
"llama3.1:latest", "llama3.1:latest",
"llama3.1:70b", "llama3.1:70b",
@@ -165,6 +40,180 @@ export class OllamaCloudService {
"yi:34b", "yi:34b",
"yi:9b", "yi:9b",
]; ];
export class OllamaCloudService {
private config: OllamaCloudConfig;
private availableModels: string[] = [];
constructor(config: OllamaCloudConfig = {}) {
this.config = {
endpoint: config.endpoint || DEFAULT_OLLAMA_ENDPOINT,
apiKey: config.apiKey || process.env.OLLAMA_API_KEY,
};
}
private getBaseUrl(): string {
const endpoint = this.config.endpoint || DEFAULT_OLLAMA_ENDPOINT;
return endpoint.replace(/\/$/, "");
}
private ensureApiPath(path: string): string {
if (path.startsWith("/api")) {
return path;
}
const normalized = path.startsWith("/") ? path : `/${path}`;
return `/api${normalized}`;
}
private getHeaders(extra: Record<string, string> = {}): Record<string, string> {
const headers: Record<string, string> = {
"Content-Type": "application/json",
...extra,
};
if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`;
}
return headers;
}
private async makeRequest(
path: string,
options: RequestInit = {},
useApiPrefix: boolean = true,
timeoutMs: number = 120_000
): Promise<Response> {
const url =
this.getBaseUrl() +
(useApiPrefix ? this.ensureApiPath(path) : (path.startsWith("/") ? path : `/${path}`));
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), timeoutMs);
const headers = {
...this.getHeaders(),
...(options.headers || {}),
};
try {
return await fetch(url, {
...options,
headers,
signal: controller.signal,
});
} finally {
clearTimeout(timeout);
}
}
private parseModelNamesFromArray(models: any[]): string[] {
return models
.map((entry) => entry?.name || entry?.model || entry?.id)
.filter((name): name is string => typeof name === "string" && name.length > 0);
}
private async fetchModelsFromV1(): Promise<string[]> {
const response = await this.makeRequest("/v1/models", { method: "GET" }, false);
if (!response.ok) {
const errorText = await response.text().catch(() => "Failed to parse response");
throw new Error(`Ollama /v1/models request failed: ${response.statusText} - ${errorText}`);
}
const json = await response.json().catch(() => null);
const entries = Array.isArray(json?.data) ? json.data : [];
const names = this.parseModelNamesFromArray(entries);
return names;
}
private async fetchModelsFromTags(): Promise<string[]> {
const response = await this.makeRequest("/tags", { method: "GET" }, true);
if (!response.ok) {
const errorText = await response.text().catch(() => "Failed to parse response");
throw new Error(`Ollama /tags request failed: ${response.statusText} - ${errorText}`);
}
const json = await response.json().catch(() => null);
const entries = Array.isArray(json?.models) ? json.models : Array.isArray(json) ? json : [];
const names = this.parseModelNamesFromArray(entries);
return names;
}
async chatCompletion(
messages: ChatMessage[],
model: string = "gpt-oss:120b",
stream: boolean = false
): Promise<APIResponse<string>> {
try {
const response = await this.makeRequest(
"/chat",
{
method: "POST",
body: JSON.stringify({
model,
messages,
stream,
}),
},
true
);
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Chat completion failed (${response.status}): ${response.statusText} - ${errorText}`);
}
const data = await response.json();
if (data.message && data.message.content) {
return { success: true, data: data.message.content };
} else if (data.choices && data.choices[0]?.message?.content) {
return { success: true, data: data.choices[0].message.content };
}
return { success: false, error: "Unexpected response format" };
} catch (error) {
console.error("[Ollama] Chat completion error:", error);
return {
success: false,
error: error instanceof Error ? error.message : "Chat completion failed",
};
}
}
async listModels(): Promise<APIResponse<string[]>> {
try {
const primary = await this.fetchModelsFromV1();
if (primary.length > 0) {
this.availableModels = primary;
return { success: true, data: primary };
}
const fallback = await this.fetchModelsFromTags();
if (fallback.length > 0) {
this.availableModels = fallback;
return { success: true, data: fallback };
}
this.availableModels = DEFAULT_MODELS;
return { success: true, data: DEFAULT_MODELS };
} catch (error) {
console.error("[Ollama] listModels error:", error);
if (DEFAULT_MODELS.length > 0) {
this.availableModels = DEFAULT_MODELS;
return { success: true, data: DEFAULT_MODELS };
}
return {
success: false,
error: error instanceof Error ? error.message : "Failed to list models",
};
}
}
getAvailableModels(): string[] {
return this.availableModels.length > 0 ? this.availableModels : DEFAULT_MODELS;
} }
async enhancePrompt(prompt: string, model?: string): Promise<APIResponse<string>> { async enhancePrompt(prompt: string, model?: string): Promise<APIResponse<string>> {

View File

@@ -182,11 +182,11 @@ export class QwenOAuthService {
return refreshed; return refreshed;
} catch (error) { } catch (error) {
console.error("Qwen token refresh failed", error); console.error("Qwen token refresh failed", error);
this.clearTokens(); this.setOAuthTokens(undefined);
return null; return null;
} }
} }
this.clearTokens(); this.setOAuthTokens(undefined);
return null; return null;
} }
@@ -197,13 +197,13 @@ export class QwenOAuthService {
* Sign out the OAuth session. * Sign out the OAuth session.
*/ */
signOut(): void { signOut(): void {
this.clearTokens(); this.setOAuthTokens(undefined);
} }
/** /**
* Stores OAuth tokens locally. * Stores OAuth tokens locally.
*/ */
setOAuthTokens(tokens?: QwenOAuthToken) { setOAuthTokens(tokens?: QwenOAuthToken | null) {
if (!tokens) { if (!tokens) {
this.token = null; this.token = null;
this.persistToken(null); this.persistToken(null);