feat: complete overhaul of AI Assist with premium WOW level UI and stable preview engine
This commit is contained in:
@@ -269,6 +269,35 @@ export class ModelAdapter {
|
||||
return this.callWithFallback((service) => service.generateAIAssist(options, model), providers);
|
||||
}
|
||||
|
||||
async generateAIAssistStream(
|
||||
options: {
|
||||
messages: AIAssistMessage[];
|
||||
currentAgent: string;
|
||||
onChunk: (chunk: string) => void;
|
||||
signal?: AbortSignal;
|
||||
},
|
||||
provider?: ModelProvider,
|
||||
model?: string
|
||||
): Promise<APIResponse<void>> {
|
||||
const fallback = this.buildFallbackProviders(this.preferredProvider, "qwen", "ollama", "zai");
|
||||
const providers: ModelProvider[] = provider ? [provider] : fallback;
|
||||
|
||||
// For now we don't handle fallback for streaming strictly, just use first available
|
||||
const activeProvider = providers[0];
|
||||
let service: any;
|
||||
switch (activeProvider) {
|
||||
case "qwen": service = this.qwenService; break;
|
||||
case "ollama": service = this.ollamaService; break;
|
||||
case "zai": service = this.zaiService; break;
|
||||
}
|
||||
|
||||
if (!service || !service.generateAIAssistStream) {
|
||||
return { success: false, error: "Streaming not supported for this provider" };
|
||||
}
|
||||
|
||||
return await service.generateAIAssistStream(options, model);
|
||||
}
|
||||
|
||||
|
||||
async chatCompletion(
|
||||
messages: ChatMessage[],
|
||||
|
||||
@@ -730,6 +730,97 @@ Perform a DEEP 360° competitive intelligence analysis and generate 5-7 strategi
|
||||
|
||||
return await this.chatCompletion(chatMessages, model || this.getAvailableModels()[0]);
|
||||
}
|
||||
|
||||
async generateAIAssistStream(
|
||||
options: {
|
||||
messages: AIAssistMessage[];
|
||||
currentAgent: string;
|
||||
onChunk: (chunk: string) => void;
|
||||
signal?: AbortSignal;
|
||||
},
|
||||
model?: string
|
||||
): Promise<APIResponse<void>> {
|
||||
try {
|
||||
// ... existing prompt logic ...
|
||||
const systemPrompt = `You are "AI Assist", the master orchestrator.
|
||||
Your goal is to provide intelligent conversational support and switch to specialized agents.
|
||||
|
||||
CANVAS MODE (CRITICAL):
|
||||
When the user asks to "build", "design", "create", or "write code", you MUST use the [PREVIEW] tag.
|
||||
Inside [PREVIEW], output ONLY the actual functional code (HTML/Tailwind, Javascript, etc.).
|
||||
Do NOT explain what the code does inside the bubble if you are generating a preview.
|
||||
The user wants to see it WORKING in the Canvas immediately.
|
||||
|
||||
STRICT OUTPUT FORMAT:
|
||||
[AGENT:id] - Optional: switch to content, seo, smm, pm, code, design, web, app.
|
||||
[PREVIEW:type:language]
|
||||
ACTUAL_FUNCTIONAL_CODE_OR_DATA
|
||||
[/PREVIEW]
|
||||
Optional conversational text (keep it brief).
|
||||
|
||||
Example for a mockup:
|
||||
[AGENT:design]
|
||||
[PREVIEW:design:html]
|
||||
<div class="bg-blue-500 p-10">...</div>
|
||||
[/PREVIEW]`;
|
||||
|
||||
const messages: ChatMessage[] = [
|
||||
{ role: "system", content: systemPrompt },
|
||||
...options.messages.map(m => ({
|
||||
role: m.role as "user" | "assistant" | "system",
|
||||
content: m.content
|
||||
}))
|
||||
];
|
||||
|
||||
const response = await fetch(LOCAL_CHAT_URL, {
|
||||
method: "POST",
|
||||
headers: this.getHeaders({ "Content-Type": "application/json" }),
|
||||
signal: options.signal,
|
||||
body: JSON.stringify({
|
||||
model: model || this.getAvailableModels()[0],
|
||||
messages,
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error("Stream request failed");
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) throw new Error("No reader");
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = "";
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
buffer += chunk;
|
||||
|
||||
const lines = buffer.split("\n");
|
||||
buffer = lines.pop() || "";
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue;
|
||||
try {
|
||||
const data = JSON.parse(line);
|
||||
if (data.message?.content) {
|
||||
options.onChunk(data.message.content);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Error parsing stream line", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, data: undefined };
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : "Stream failed" };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default OllamaCloudService;
|
||||
|
||||
@@ -1006,6 +1006,107 @@ Perform analysis based on provided instructions.`,
|
||||
return await this.chatCompletion(chatMessages, model || this.getAvailableModels()[0]);
|
||||
}
|
||||
|
||||
async generateAIAssistStream(
|
||||
options: {
|
||||
messages: AIAssistMessage[];
|
||||
currentAgent: string;
|
||||
onChunk: (chunk: string) => void;
|
||||
signal?: AbortSignal;
|
||||
},
|
||||
model?: string
|
||||
): Promise<APIResponse<void>> {
|
||||
try {
|
||||
// ... existing prompt logic ...
|
||||
const systemPrompt = `You are "AI Assist".
|
||||
Your goal is to provide intelligent support with a "Canvas" experience.
|
||||
|
||||
CANVAS MODE (CRITICAL):
|
||||
When building or designing, you MUST use the [PREVIEW] tag.
|
||||
Inside [PREVIEW], output ONLY the actual code (HTML/Tailwind etc).
|
||||
The user wants to see it WORKING in the Canvas immediately.
|
||||
|
||||
STRICT OUTPUT FORMAT:
|
||||
[AGENT:id] - Optional: content, seo, smm, pm, code, design, web, app.
|
||||
[PREVIEW:type:language]
|
||||
ACTUAL_FUNCTIONAL_CODE
|
||||
[/PREVIEW]
|
||||
Optional brief text.`;
|
||||
|
||||
const messages: ChatMessage[] = [
|
||||
{ role: "system", content: systemPrompt },
|
||||
...options.messages.map(m => ({
|
||||
role: m.role as "user" | "assistant" | "system",
|
||||
content: m.content
|
||||
}))
|
||||
];
|
||||
|
||||
const endpoint = "/tools/promptarch/api/qwen/chat";
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
};
|
||||
|
||||
const tokenInfo = this.getTokenInfo();
|
||||
if (tokenInfo?.accessToken) {
|
||||
headers["Authorization"] = `Bearer ${tokenInfo.accessToken}`;
|
||||
} else if (this.apiKey) {
|
||||
headers["Authorization"] = `Bearer ${this.apiKey}`;
|
||||
}
|
||||
|
||||
const response = await fetch(endpoint, {
|
||||
method: "POST",
|
||||
headers,
|
||||
signal: options.signal,
|
||||
body: JSON.stringify({
|
||||
model: model || this.getAvailableModels()[0],
|
||||
messages,
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error("Stream request failed");
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) throw new Error("No reader");
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = "";
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
buffer += chunk;
|
||||
|
||||
const lines = buffer.split("\n");
|
||||
buffer = lines.pop() || "";
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmedLine = line.trim();
|
||||
if (!trimmedLine || !trimmedLine.startsWith("data:")) continue;
|
||||
|
||||
const dataStr = trimmedLine.replace(/^data:\s*/, "");
|
||||
if (dataStr === "[DONE]") break;
|
||||
|
||||
try {
|
||||
const data = JSON.parse(dataStr);
|
||||
if (data.choices?.[0]?.delta?.content) {
|
||||
options.onChunk(data.choices[0].delta.content);
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore parse errors for incomplete lines
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, data: undefined };
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : "Stream failed" };
|
||||
}
|
||||
}
|
||||
|
||||
async listModels(): Promise<APIResponse<string[]>> {
|
||||
const models = [
|
||||
"coder-model",
|
||||
|
||||
@@ -799,6 +799,88 @@ MISSION: Perform a DEEP 360° competitive intelligence analysis and generate 5-7
|
||||
|
||||
return await this.chatCompletion(chatMessages, model || this.getAvailableModels()[0]);
|
||||
}
|
||||
|
||||
async generateAIAssistStream(
|
||||
options: {
|
||||
messages: AIAssistMessage[];
|
||||
currentAgent: string;
|
||||
onChunk: (chunk: string) => void;
|
||||
signal?: AbortSignal;
|
||||
},
|
||||
model?: string
|
||||
): Promise<APIResponse<void>> {
|
||||
try {
|
||||
if (!this.config.apiKey) {
|
||||
throw new Error("API key is required.");
|
||||
}
|
||||
|
||||
// ... existing prompt logic ...
|
||||
const systemPrompt = `You are "AI Assist".
|
||||
Your goal is to provide a "Canvas" experience.
|
||||
|
||||
CANVAS MODE (CRITICAL):
|
||||
When building or designing, you MUST use the [PREVIEW] tag.
|
||||
Inside [PREVIEW], output ONLY the actual code (HTML/Tailwind etc).
|
||||
The user wants to see it WORKING in the Canvas immediately.
|
||||
|
||||
STRICT OUTPUT FORMAT:
|
||||
[AGENT:id] - Optional switch.
|
||||
[PREVIEW:type:language]
|
||||
ACTUAL_FUNCTIONAL_CODE
|
||||
[/PREVIEW]
|
||||
Optional brief text.`;
|
||||
|
||||
const messages: ChatMessage[] = [
|
||||
{ role: "system", content: systemPrompt },
|
||||
...options.messages.map(m => ({
|
||||
role: m.role as "user" | "assistant" | "system",
|
||||
content: m.content
|
||||
}))
|
||||
];
|
||||
|
||||
const endpoint = this.config.codingEndpoint; // AI Assist often involves coding
|
||||
const response = await fetch(`${endpoint}/chat/completions`, {
|
||||
method: "POST",
|
||||
headers: this.getHeaders(),
|
||||
signal: options.signal,
|
||||
body: JSON.stringify({
|
||||
model: model || this.getAvailableModels()[0],
|
||||
messages,
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Stream failed: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) throw new Error("No reader");
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split("\n");
|
||||
for (const line of lines) {
|
||||
if (!line.trim() || !line.startsWith("data:")) continue;
|
||||
const dataStr = line.replace(/^data:\s*/, "");
|
||||
if (dataStr === "[DONE]") break;
|
||||
try {
|
||||
const data = JSON.parse(dataStr);
|
||||
const content = data.choices?.[0]?.delta?.content || data.output?.choices?.[0]?.delta?.content;
|
||||
if (content) options.onChunk(content);
|
||||
} catch (e) { }
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, data: undefined };
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : "Stream failed" };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default ZaiPlanService;
|
||||
|
||||
Reference in New Issue
Block a user