- Context Compaction System with token counting and summarization - Deterministic State Machine for flow control (no LLM decisions) - Parallel Execution Engine (up to 12 concurrent sessions) - Event-Driven Coordination via Event Bus - Agent Workspace Isolation (tools, memory, identity, files) - YAML Workflow Integration (OpenClaw/Lobster compatible) - Claude Code integration layer - Complete demo UI with real-time visualization - Comprehensive documentation and README Components: - agent-system/: Context management, token counting, subagent spawning - pipeline-system/: State machine, parallel executor, event bus, workflows - skills/: AI capabilities (LLM, ASR, TTS, VLM, image generation, etc.) - src/app/: Next.js demo application Total: ~100KB of production-ready TypeScript code
33 lines
758 B
TypeScript
Executable File
33 lines
758 B
TypeScript
Executable File
import ZAI, { ChatMessage } from "z-ai-web-dev-sdk";
|
|
|
|
async function main(prompt: string) {
|
|
try {
|
|
const zai = await ZAI.create();
|
|
|
|
const messages: ChatMessage[] = [
|
|
{
|
|
role: "assistant",
|
|
content: "Hi, I'm a helpful assistant."
|
|
},
|
|
{
|
|
role: "user",
|
|
content: prompt,
|
|
},
|
|
];
|
|
|
|
const response = await zai.chat.completions.create({
|
|
messages,
|
|
stream: false,
|
|
thinking: { type: "disabled" },
|
|
});
|
|
|
|
const reply = response.choices?.[0]?.message?.content;
|
|
console.log("Chat reply:");
|
|
console.log(reply ?? JSON.stringify(response, null, 2));
|
|
} catch (err: any) {
|
|
console.error("Chat failed:", err?.message || err);
|
|
}
|
|
}
|
|
|
|
main('What is the capital of France?');
|