feat: fully enable self-correction loops

- Import withSelfCorrection from self-correction.js
- Wrap chatWithAI() with self-correction wrapper
- Add /selfcorrection command to show status
- Update /start to mention self-correction and streaming
- Self-correction: 2 retries + exponential backoff + auto-simplification
- Triggers: error responses, rate limits, timeouts, 5xx errors
This commit is contained in:
admin
2026-05-05 13:22:10 +00:00
Unverified
parent e658842458
commit 96da24cf86

View File

@@ -10,6 +10,7 @@ import { getRTK } from '../utils/rtk.js';
import { isDuplicate, markProcessed } from './deduplication.js'; import { isDuplicate, markProcessed } from './deduplication.js';
import { queueRequest, clearQueue, isProcessing } from './request-queue.js'; import { queueRequest, clearQueue, isProcessing } from './request-queue.js';
import { sendFormatted, splitMessage, escapeMarkdown } from './message-sender.js'; import { sendFormatted, splitMessage, escapeMarkdown } from './message-sender.js';
import { withSelfCorrection } from './self-correction.js';
function buildSessionKey(chatId, threadId) { function buildSessionKey(chatId, threadId) {
return threadId ? `${chatId}:${threadId}` : String(chatId); return threadId ? `${chatId}:${threadId}` : String(chatId);
@@ -314,7 +315,10 @@ export async function initBot(config, api, tools, skills, agents) {
'📚 *Skills:* ' + svc.skills.length + ' loaded', '📚 *Skills:* ' + svc.skills.length + ' loaded',
'🤖 *Agents:* ' + svc.agents.length + ' available', '🤖 *Agents:* ' + svc.agents.length + ' available',
'', '',
'📋 *Commands:* /tools /skills /agents /model /stats /voice /mcp /memory /cron /cancel', '🔄 *Self-Correction*: 2 retries + auto-simplification',
'⚡ *Streaming*: Real-time text delivery',
'',
'📋 *Commands:* /tools /skills /agents /model /stats /voice /mcp /memory /cron /cancel /selfcorrection',
'', '',
'Or just chat — I will use tools when needed.', 'Or just chat — I will use tools when needed.',
`Model: \`${svc.config?.api?.models?.default || 'glm-5.1'}\``, `Model: \`${svc.config?.api?.models?.default || 'glm-5.1'}\``,
@@ -349,6 +353,10 @@ export async function initBot(config, api, tools, skills, agents) {
} }
await sendStreamingMessage(ctx, lines.join('\n')); await sendStreamingMessage(ctx, lines.join('\n'));
}); });
bot.command('selfcorrection', async (ctx) => {
await sendStreamingMessage(ctx, `🔄 *Self-Correction Loops* — FULLY ENABLED\n\nzCode CLI X now uses automatic self-correction:\n\n• **Max Retries**: 2 attempts\n• **Retry Delay**: 500ms → 1s → 1.5s (exponential backoff)\n• **Triggers**: \n - ❌ Error responses\n - Rate limits\n - Timeouts\n - 5xx server errors\n\n• **Auto-Simplification**: On retry, prompts are simplified to avoid recurring errors\n\n• **Logging**: All retries are logged with retry count and reason\n\nThis ensures robust responses even when the AI initially fails.`);
});
bot.command('model', async (ctx) => { bot.command('model', async (ctx) => {
const text = ctx.match?.trim(); const text = ctx.match?.trim();
@@ -427,7 +435,7 @@ export async function initBot(config, api, tools, skills, agents) {
}); });
} }
// ── Message text handler (with dedup + queue) ── // ── Message text handler (with dedup + queue + self-correction) ──
bot.on('message:text', async (ctx) => { bot.on('message:text', async (ctx) => {
if (isDuplicate(ctx.message.message_id)) return; if (isDuplicate(ctx.message.message_id)) return;
markProcessed(ctx.message.message_id); markProcessed(ctx.message.message_id);
@@ -439,7 +447,13 @@ export async function initBot(config, api, tools, skills, agents) {
await queueRequest(key, text, async () => { await queueRequest(key, text, async () => {
await ctx.api.sendChatAction(ctx.chat.id, 'typing'); await ctx.api.sendChatAction(ctx.chat.id, 'typing');
const result = await chatWithAI([
// Wrap chatWithAI with self-correction
const chatWithCorrection = withSelfCorrection(async (msgs) => {
return await chatWithAI(msgs, {});
});
const result = await chatWithCorrection([
{ role: 'system', content: buildSystemPrompt(svc) }, { role: 'system', content: buildSystemPrompt(svc) },
{ role: 'user', content: text }, { role: 'user', content: text },
]); ]);