feat: Add intelligent auto-router and enhanced integrations

- Add intelligent-router.sh hook for automatic agent routing
- Add AUTO-TRIGGER-SUMMARY.md documentation
- Add FINAL-INTEGRATION-SUMMARY.md documentation
- Complete Prometheus integration (6 commands + 4 tools)
- Complete Dexto integration (12 commands + 5 tools)
- Enhanced Ralph with access to all agents
- Fix /clawd command (removed disable-model-invocation)
- Update hooks.json to v5 with intelligent routing
- 291 total skills now available
- All 21 commands with automatic routing

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
admin
2026-01-28 00:27:56 +04:00
Unverified
parent 3b128ba3bd
commit b52318eeae
1724 changed files with 351216 additions and 0 deletions

View File

@@ -0,0 +1,157 @@
# Request Logger Agent Configuration
# Demonstrates custom plugin integration with complete lifecycle testing
# Logs all user requests, tool calls, and assistant responses to ~/.dexto/logs/request-logger.log
# MCP Servers - basic filesystem and browser tools
mcpServers:
filesystem:
type: stdio
command: npx
args:
- -y
- "@modelcontextprotocol/server-filesystem"
- .
playwright:
type: stdio
command: npx
args:
- -y
- "@playwright/mcp@latest"
# System prompt configuration
systemPrompt:
contributors:
- id: primary
type: static
priority: 0
content: |
You are a helpful AI assistant with comprehensive logging enabled.
All your interactions (requests, tool calls, and responses) are being logged to help understand your behavior.
Use tools when appropriate to answer user queries. You can use multiple tools in sequence to solve complex problems.
After each tool result, determine if you need more information or can provide a final answer.
- id: date
type: dynamic
priority: 10
source: date
enabled: true
# Memory configuration - controls how memories are included in system prompt
memories:
enabled: true
priority: 40
includeTimestamps: false
includeTags: true
limit: 10
pinnedOnly: false
# Optional greeting shown at chat start
greeting: "Hi! I'm the Logger Agent — all interactions are being logged for analysis. How can I help?"
# LLM configuration
llm:
provider: openai
model: gpt-5-mini
apiKey: $OPENAI_API_KEY
# Storage configuration
storage:
cache:
type: in-memory
database:
type: sqlite
blob:
type: local # CLI provides storePath automatically
maxBlobSize: 52428800
maxTotalSize: 1073741824
cleanupAfterDays: 30
# Tool confirmation settings
toolConfirmation:
mode: manual
# timeout: omitted = infinite wait
allowedToolsStorage: memory
# Elicitation configuration - required for ask_user tool
elicitation:
enabled: true
# timeout: omitted = infinite wait
# Internal tools
internalTools:
- ask_user
# Internal resources configuration
internalResources:
enabled: true
resources:
- type: filesystem
paths: ["."]
maxFiles: 50
maxDepth: 3
includeHidden: false
includeExtensions: [".txt", ".md", ".json", ".yaml", ".yml", ".js", ".ts", ".py", ".html", ".css"]
- type: blob
# Plugin system configuration
plugins:
# Built-in plugins
contentPolicy:
priority: 10
blocking: true
maxInputChars: 50000
redactEmails: true
redactApiKeys: true
enabled: true
responseSanitizer:
priority: 900
blocking: false
redactEmails: true
redactApiKeys: true
maxResponseLength: 100000
enabled: true
# Custom Request Logger Plugin
custom:
- name: request-logger
module: "${{dexto.agent_dir}}/plugins/request-logger.ts"
enabled: true
blocking: false # Non-blocking - we just want to observe, not interfere
priority: 5 # Run early to capture original data before other plugins modify it
config: {} # Empty config uses defaults: ~/.dexto/logs/request-logger.log
# Prompts - shown as clickable buttons in WebUI
prompts:
- type: inline
id: simple-question
title: "🤔 Ask a Simple Question"
description: "Test basic request/response logging"
prompt: "What is the capital of France?"
category: learning
priority: 9
showInStarters: true
- type: inline
id: tool-usage
title: "🔧 Use a Tool"
description: "Test tool call and result logging"
prompt: "List the files in the current directory"
category: tools
priority: 8
showInStarters: true
- type: inline
id: multi-step
title: "🎯 Multi-Step Task"
description: "Test logging across multiple tool calls"
prompt: "Create a new file called test.txt with the content 'Hello from Logger Agent' and then read it back to me"
category: tools
priority: 7
showInStarters: true
- type: inline
id: check-logs
title: "📋 Check the Logs"
description: "View the request logger output"
prompt: "Can you read the file at ~/.dexto/logs/request-logger.log and show me the last 50 lines?"
category: tools
priority: 6
showInStarters: true

View File

@@ -0,0 +1,186 @@
import type {
DextoPlugin,
BeforeLLMRequestPayload,
BeforeResponsePayload,
BeforeToolCallPayload,
AfterToolResultPayload,
PluginResult,
PluginExecutionContext,
} from '@core/plugins/types.js';
import { promises as fs } from 'fs';
import { homedir } from 'os';
import { join } from 'path';
/**
* Request Logger Plugin
*
* Logs all user requests and assistant responses to a file for debugging and analysis.
* Demonstrates the complete plugin lifecycle including resource management.
*
* Features:
* - Logs user input (text, images, files)
* - Logs tool calls and results
* - Logs assistant responses with token usage
* - Proper resource cleanup on shutdown
*/
export class RequestLoggerPlugin implements DextoPlugin {
private logFilePath: string = '';
private logFileHandle: fs.FileHandle | null = null;
private requestCount: number = 0;
/**
* Initialize plugin - create log directory and open log file
*/
async initialize(config: Record<string, any>): Promise<void> {
// Default log path: ~/.dexto/logs/request-logger.log
const logDir = config.logDir || join(homedir(), '.dexto', 'logs');
const logFileName = config.logFileName || 'request-logger.log';
this.logFilePath = join(logDir, logFileName);
// Ensure log directory exists
await fs.mkdir(logDir, { recursive: true });
// Open log file in append mode
this.logFileHandle = await fs.open(this.logFilePath, 'a');
// Write initialization header
await this.writeLog('='.repeat(80));
await this.writeLog(`Request Logger initialized at ${new Date().toISOString()}`);
await this.writeLog(`Log file: ${this.logFilePath}`);
await this.writeLog('='.repeat(80));
}
/**
* Log user input before it's sent to the LLM
*/
async beforeLLMRequest(
payload: BeforeLLMRequestPayload,
context: PluginExecutionContext
): Promise<PluginResult> {
this.requestCount++;
await this.writeLog('');
await this.writeLog(`[${this.requestCount}] USER REQUEST at ${new Date().toISOString()}`);
await this.writeLog(`Session: ${payload.sessionId || 'unknown'}`);
await this.writeLog(`User: ${context.userId || 'anonymous'}`);
await this.writeLog(`Model: ${context.llmConfig.provider}/${context.llmConfig.model}`);
await this.writeLog('-'.repeat(40));
await this.writeLog(`Text: ${payload.text}`);
if (payload.imageData) {
await this.writeLog(
`Image: ${payload.imageData.mimeType} (${payload.imageData.image.length} chars)`
);
}
if (payload.fileData) {
await this.writeLog(
`File: ${payload.fileData.filename || 'unknown'} (${payload.fileData.mimeType})`
);
}
await this.writeLog('-'.repeat(40));
return { ok: true };
}
/**
* Log tool calls before execution
*/
async beforeToolCall(
payload: BeforeToolCallPayload,
context: PluginExecutionContext
): Promise<PluginResult> {
await this.writeLog('');
await this.writeLog(`[${this.requestCount}] TOOL CALL at ${new Date().toISOString()}`);
await this.writeLog(`Tool: ${payload.toolName}`);
await this.writeLog(`Call ID: ${payload.callId || 'unknown'}`);
await this.writeLog(`Arguments: ${JSON.stringify(payload.args, null, 2)}`);
return { ok: true };
}
/**
* Log tool results after execution
*/
async afterToolResult(
payload: AfterToolResultPayload,
context: PluginExecutionContext
): Promise<PluginResult> {
await this.writeLog('');
await this.writeLog(`[${this.requestCount}] TOOL RESULT at ${new Date().toISOString()}`);
await this.writeLog(`Tool: ${payload.toolName}`);
await this.writeLog(`Call ID: ${payload.callId || 'unknown'}`);
await this.writeLog(`Success: ${payload.success}`);
const resultStr =
typeof payload.result === 'string'
? payload.result.substring(0, 500) + (payload.result.length > 500 ? '...' : '')
: JSON.stringify(payload.result, null, 2).substring(0, 500);
await this.writeLog(`Result: ${resultStr}`);
return { ok: true };
}
/**
* Log assistant response before it's sent to the user
*/
async beforeResponse(
payload: BeforeResponsePayload,
context: PluginExecutionContext
): Promise<PluginResult> {
await this.writeLog('');
await this.writeLog(
`[${this.requestCount}] ASSISTANT RESPONSE at ${new Date().toISOString()}`
);
await this.writeLog(`Session: ${payload.sessionId || 'unknown'}`);
await this.writeLog(`Model: ${payload.provider}/${payload.model || 'unknown'}`);
if (payload.tokenUsage) {
await this.writeLog(
`Tokens: ${payload.tokenUsage.input} input, ${payload.tokenUsage.output} output`
);
}
await this.writeLog('-'.repeat(40));
await this.writeLog(`Content: ${payload.content}`);
if (payload.reasoning) {
await this.writeLog('-'.repeat(40));
await this.writeLog(`Reasoning: ${payload.reasoning}`);
}
await this.writeLog('-'.repeat(40));
return { ok: true };
}
/**
* Cleanup - close log file handle
*/
async cleanup(): Promise<void> {
await this.writeLog('');
await this.writeLog('='.repeat(80));
await this.writeLog(`Request Logger shutting down at ${new Date().toISOString()}`);
await this.writeLog(`Total requests logged: ${this.requestCount}`);
await this.writeLog('='.repeat(80));
if (this.logFileHandle) {
await this.logFileHandle.close();
this.logFileHandle = null;
}
}
/**
* Helper method to write to log file
*/
private async writeLog(message: string): Promise<void> {
if (this.logFileHandle) {
await this.logFileHandle.write(message + '\n');
}
}
}
// Export the plugin class directly for the plugin manager to instantiate
export default RequestLoggerPlugin;