feat: Add Ollama Cloud integration with 20+ free AI models
- Added AI Model Manager to sidebar for quick model switching - Integrated Ollama Cloud API with official models from ollama.com - Added AISettingsModal with searchable model catalog - Models include: GPT-OSS 120B, DeepSeek V3.2, Gemini 3 Pro, Qwen3 Coder, etc. - Added 'Get Key' button linking to ollama.com/settings/keys - Updated README with Ollama Cloud documentation and free API key instructions - Fixed ChatPanel export issue - Added Brain icon for reasoning models
This commit is contained in:
@@ -1,7 +1,8 @@
|
||||
import { app, BrowserWindow, ipcMain, shell, protocol, net } from 'electron';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { streamChat } from './qwen-api.js';
|
||||
import { streamChat as qwenStreamChat } from './qwen-api.js';
|
||||
import * as ollamaApi from './ollama-api.js';
|
||||
import { generateImage, detectImageRequest, cleanupCache } from './image-api.js';
|
||||
import { fsApi } from './fs-api.js';
|
||||
import * as viAutomation from './vi-automation.js';
|
||||
@@ -172,22 +173,53 @@ ipcMain.handle('export-project-zip', async (_, { projectId }) => {
|
||||
});
|
||||
|
||||
// Chat Streaming IPC
|
||||
ipcMain.on('chat-stream-start', (event, { messages, model }) => {
|
||||
ipcMain.on('chat-stream-start', async (event, { messages, model }) => {
|
||||
const window = BrowserWindow.fromWebContents(event.sender);
|
||||
|
||||
streamChat(
|
||||
messages,
|
||||
model,
|
||||
(chunk) => {
|
||||
if (!window.isDestroyed()) {
|
||||
// console.log('[Main] Sending chunk size:', chunk.length); // Verbose log
|
||||
window.webContents.send('chat-chunk', chunk);
|
||||
}
|
||||
},
|
||||
(fullResponse) => !window.isDestroyed() && window.webContents.send('chat-complete', fullResponse),
|
||||
(error) => !window.isDestroyed() && window.webContents.send('chat-error', error.message),
|
||||
(status) => !window.isDestroyed() && window.webContents.send('chat-status', status)
|
||||
);
|
||||
// Choose provider based on model prefix or name
|
||||
// Default to qwen unless model starts with 'ollama:' or matches known ollama models
|
||||
const isOllama = model?.startsWith('ollama:') || model === 'gpt-oss:120b';
|
||||
const cleanModel = isOllama ? model.replace('ollama:', '') : model;
|
||||
|
||||
const onChunk = (chunk) => {
|
||||
if (!window.isDestroyed()) window.webContents.send('chat-chunk', chunk);
|
||||
};
|
||||
const onComplete = (full) => {
|
||||
if (!window.isDestroyed()) window.webContents.send('chat-complete', full);
|
||||
};
|
||||
const onError = (err) => {
|
||||
if (!window.isDestroyed()) window.webContents.send('chat-error', typeof err === 'string' ? err : err.message);
|
||||
};
|
||||
const onStatus = (status) => {
|
||||
if (!window.isDestroyed()) window.webContents.send('chat-status', status);
|
||||
};
|
||||
|
||||
if (isOllama) {
|
||||
// Ensure key is loaded
|
||||
const key = await getSecret('ollama-cloud-key');
|
||||
ollamaApi.setApiKey(key);
|
||||
ollamaApi.streamChat(messages, cleanModel, onChunk, onComplete, onError, onStatus);
|
||||
} else {
|
||||
qwenStreamChat(messages, model, onChunk, onComplete, onError, onStatus);
|
||||
}
|
||||
});
|
||||
|
||||
// Ollama Specific Handlers
|
||||
ipcMain.handle('ollama-get-key-status', async () => {
|
||||
const key = await getSecret('ollama-cloud-key');
|
||||
return { hasKey: !!key };
|
||||
});
|
||||
|
||||
ipcMain.handle('ollama-save-key', async (_, { key }) => {
|
||||
await saveSecret('ollama-cloud-key', key);
|
||||
ollamaApi.setApiKey(key);
|
||||
return true;
|
||||
});
|
||||
|
||||
ipcMain.handle('ollama-get-models', async () => {
|
||||
const key = await getSecret('ollama-cloud-key');
|
||||
ollamaApi.setApiKey(key);
|
||||
return await ollamaApi.listModels();
|
||||
});
|
||||
|
||||
// FS Handlers
|
||||
|
||||
147
bin/goose-ultra-final/electron/ollama-api.js
Normal file
147
bin/goose-ultra-final/electron/ollama-api.js
Normal file
@@ -0,0 +1,147 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import https from 'https';
|
||||
import os from 'os';
|
||||
|
||||
/**
|
||||
* Ollama Cloud API Bridge for Goose Ultra
|
||||
* Base URL: https://ollama.com/api
|
||||
*/
|
||||
|
||||
// We'll manage key storage via main.js using keytar
|
||||
let cachedApiKey = null;
|
||||
|
||||
export function setApiKey(key) {
|
||||
cachedApiKey = key;
|
||||
}
|
||||
|
||||
let activeRequest = null;
|
||||
|
||||
export function abortActiveChat() {
|
||||
if (activeRequest) {
|
||||
try {
|
||||
activeRequest.destroy();
|
||||
} catch (e) { }
|
||||
activeRequest = null;
|
||||
}
|
||||
}
|
||||
|
||||
export async function streamChat(messages, model = 'gpt-oss:120b', onChunk, onComplete, onError, onStatus) {
|
||||
abortActiveChat();
|
||||
|
||||
if (!cachedApiKey) {
|
||||
onError(new Error('OLLAMA_CLOUD_KEY_MISSING: Please set your Ollama Cloud API Key in Settings.'));
|
||||
return;
|
||||
}
|
||||
|
||||
const log = (msg) => {
|
||||
if (onStatus) onStatus(`[Ollama] ${msg}`);
|
||||
};
|
||||
|
||||
const body = JSON.stringify({
|
||||
model,
|
||||
messages,
|
||||
stream: true
|
||||
});
|
||||
|
||||
const options = {
|
||||
hostname: 'ollama.com',
|
||||
port: 443,
|
||||
path: '/api/chat',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${cachedApiKey}`,
|
||||
'Content-Length': Buffer.byteLength(body)
|
||||
}
|
||||
};
|
||||
|
||||
log(`Connecting to ollama.com as ${model}...`);
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
activeRequest = req;
|
||||
let fullResponse = '';
|
||||
|
||||
if (res.statusCode !== 200) {
|
||||
let errBody = '';
|
||||
res.on('data', (c) => errBody += c.toString());
|
||||
res.on('end', () => {
|
||||
onError(new Error(`Ollama API Error ${res.statusCode}: ${errBody}`));
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
res.setEncoding('utf8');
|
||||
let buffer = '';
|
||||
|
||||
res.on('data', (chunk) => {
|
||||
buffer += chunk;
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop();
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue;
|
||||
try {
|
||||
const parsed = JSON.parse(line);
|
||||
const content = parsed.message?.content || '';
|
||||
if (content) {
|
||||
fullResponse += content;
|
||||
onChunk(content);
|
||||
}
|
||||
if (parsed.done) {
|
||||
// Request is done according to Ollama API
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore malformed JSON chunks
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
res.on('end', () => {
|
||||
onComplete(fullResponse);
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', (e) => {
|
||||
onError(e);
|
||||
});
|
||||
|
||||
req.setNoDelay(true);
|
||||
req.write(body);
|
||||
req.end();
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch available models from Ollama Cloud
|
||||
*/
|
||||
export async function listModels() {
|
||||
if (!cachedApiKey) return [];
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const options = {
|
||||
hostname: 'ollama.com',
|
||||
port: 443,
|
||||
path: '/api/tags',
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${cachedApiKey}`
|
||||
}
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let body = '';
|
||||
res.on('data', (c) => body += c.toString());
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const data = JSON.parse(body);
|
||||
resolve(data.models || []);
|
||||
} catch (e) {
|
||||
resolve([]);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', (e) => resolve([]));
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
@@ -91,5 +91,11 @@ contextBridge.exposeInMainWorld('electron', {
|
||||
|
||||
// Browser
|
||||
openBrowser: (url) => ipcRenderer.invoke('vi-open-browser', { url })
|
||||
},
|
||||
// Ollama Cloud
|
||||
ollama: {
|
||||
getKeyStatus: () => ipcRenderer.invoke('ollama-get-key-status'),
|
||||
saveKey: (key) => ipcRenderer.invoke('ollama-save-key', { key }),
|
||||
getModels: () => ipcRenderer.invoke('ollama-get-models')
|
||||
}
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user