feat: Add intelligent auto-router and enhanced integrations

- Add intelligent-router.sh hook for automatic agent routing
- Add AUTO-TRIGGER-SUMMARY.md documentation
- Add FINAL-INTEGRATION-SUMMARY.md documentation
- Complete Prometheus integration (6 commands + 4 tools)
- Complete Dexto integration (12 commands + 5 tools)
- Enhanced Ralph with access to all agents
- Fix /clawd command (removed disable-model-invocation)
- Update hooks.json to v5 with intelligent routing
- 291 total skills now available
- All 21 commands with automatic routing

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
admin
2026-01-28 00:27:56 +04:00
Unverified
parent 3b128ba3bd
commit b52318eeae
1724 changed files with 351216 additions and 0 deletions

View File

@@ -0,0 +1,142 @@
#!/usr/bin/env tsx
/**
* Clean build artifacts, temporary files, and caches across the monorepo
*/
import fs from 'fs-extra';
import path from 'path';
import { fileURLToPath } from 'url';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const rootDir = path.resolve(__dirname, '..');
// Directories to clean (relative to root and packages)
const CLEAN_DIRS = [
// Build outputs
'dist',
'build',
'.next',
'.turbo',
// Cache directories
'.eslintcache',
'.tsbuildinfo',
'tsconfig.tsbuildinfo',
// Test artifacts
'coverage',
'.nyc_output',
'test-temp',
// Logs
'logs',
'*.log',
];
// Files to clean by extension
const CLEAN_EXTENSIONS = [
'.tsbuildinfo',
'.log',
'.tgz', // Remove any leftover tarballs
];
// Directories to never delete (safety)
const PROTECTED_DIRS = [
'.git',
'.github',
'node_modules', // Let pnpm handle these
];
async function cleanDirectory(dir: string, targetName: string): Promise<void> {
const targetPath = path.join(dir, targetName);
if (await fs.pathExists(targetPath)) {
try {
await fs.remove(targetPath);
console.log(`✅ Removed: ${path.relative(rootDir, targetPath)}`);
} catch (err) {
console.error(`⚠️ Failed to remove: ${path.relative(rootDir, targetPath)}`, err);
}
}
}
async function cleanPackages(): Promise<void> {
const packagesDir = path.join(rootDir, 'packages');
if (!(await fs.pathExists(packagesDir))) {
console.log('⚠️ No packages directory found');
return;
}
const packages = await fs.readdir(packagesDir);
for (const pkg of packages) {
const pkgPath = path.join(packagesDir, pkg);
const stat = await fs.stat(pkgPath);
if (stat.isDirectory()) {
console.log(`\n📦 Cleaning package: ${pkg}`);
// Clean each target directory in the package
for (const target of CLEAN_DIRS) {
await cleanDirectory(pkgPath, target);
}
}
}
}
async function cleanRoot(): Promise<void> {
console.log('\n🏠 Cleaning root directory');
// Clean root-level directories
for (const target of CLEAN_DIRS) {
await cleanDirectory(rootDir, target);
}
// Clean root-level files by extension
const rootFiles = await fs.readdir(rootDir);
for (const file of rootFiles) {
const shouldDelete = CLEAN_EXTENSIONS.some((ext) => file.endsWith(ext));
if (shouldDelete) {
const filePath = path.join(rootDir, file);
try {
await fs.remove(filePath);
console.log(`✅ Removed: ${file}`);
} catch (err) {
console.error(`⚠️ Failed to remove: ${file}`, err);
}
}
}
}
async function main(): Promise<void> {
console.log('🧹 Starting comprehensive cleanup...\n');
console.log('This will remove:');
console.log(' • Package dist and build directories');
console.log(' • Next.js .next directories');
console.log(' • TypeScript build info files');
console.log(' • Test coverage reports');
console.log(' • Logs and cache files');
console.log(' • Leftover tarballs\n');
try {
await cleanPackages();
await cleanRoot();
// NOTE: cleanStorage() is NOT called here to preserve conversation history
// Use `pnpm clean:storage` explicitly if you need to wipe .dexto
console.log('\n✨ Cleanup completed successfully!');
console.log('Run "pnpm install" if you need to reinstall dependencies.');
console.log('Note: .dexto storage was preserved. Use "pnpm clean:storage" to wipe it.');
} catch (err) {
console.error('\n❌ Cleanup failed:', err);
process.exit(1);
}
}
// Execute cleanup
main().catch((err) => {
console.error('Fatal error:', err);
process.exit(1);
});

View File

@@ -0,0 +1,46 @@
// Script to copy built Vite webui files to CLI dist
import fs from 'fs-extra';
import path from 'path';
import { fileURLToPath } from 'url';
// Get the directory name of the current module
const __dirname: string = path.dirname(fileURLToPath(import.meta.url));
const rootDir: string = path.resolve(__dirname, '..');
// Define source and target paths
// Vite outputs to packages/webui/dist
const sourceWebUIDir: string = path.join(rootDir, 'packages', 'webui', 'dist');
// Copy into CLI's dist folder for embedding
const targetDir: string = path.join(rootDir, 'packages', 'cli', 'dist', 'webui');
async function copyWebUIBuild(): Promise<void> {
try {
// Check if source directory exists
if (!fs.existsSync(sourceWebUIDir)) {
console.log('⚠️ WebUI dist not found. Run "pnpm build:webui" first.');
console.log(` Expected path: ${sourceWebUIDir}`);
process.exit(1);
}
// Ensure the target directory doesn't exist to avoid conflicts
if (fs.existsSync(targetDir)) {
console.log('Removing existing target directory...');
await fs.remove(targetDir);
}
console.log(`Copying built webui from ${sourceWebUIDir} to ${targetDir}...`);
// Copy the entire Vite dist folder
await fs.copy(sourceWebUIDir, targetDir);
console.log('✅ Successfully copied built webui to dist');
console.log(` Source: ${sourceWebUIDir}`);
console.log(` Target: ${targetDir}`);
} catch (err: unknown) {
console.error('❌ Error copying built webui:', err);
process.exit(1);
}
}
// Execute the copy function
copyWebUIBuild();

192
dexto/scripts/dev-server.ts Normal file
View File

@@ -0,0 +1,192 @@
#!/usr/bin/env tsx
/**
* Development server that:
* 1. Builds all packages (turbo handles dependency graph)
* 2. Runs the CLI directly from dist/index.js in server mode (API on port 3001 by default)
* 3. Starts Vite dev server for WebUI with hot reload (port 3000)
* 4. Opens browser automatically when WebUI is ready
*
* Vite proxies /api/* requests to the API server (configured in vite.config.ts)
*
* Usage:
* pnpm dev # Use default agent on port 3001
* pnpm dev -- --agent examples/resources-demo-server/agent.yml
* pnpm dev -- --port 6767 # Custom API port
* pnpm dev -- --agent my-agent.yml --port 6767
*/
import { execSync, spawn, ChildProcess } from 'child_process';
import { join } from 'path';
import open from 'open';
const rootDir = process.cwd();
const cliPath = join(rootDir, 'packages/cli/dist/index.js');
let apiProcess: ChildProcess | null = null;
let webuiProcess: ChildProcess | null = null;
let browserOpened = false;
let webuiStarted = false;
// Parse command-line arguments
const args = process.argv.slice(2);
const agentIndex = args.indexOf('--agent');
const agentPath = agentIndex !== -1 && agentIndex + 1 < args.length ? args[agentIndex + 1] : null;
const portIndex = args.indexOf('--port');
const rawPort = portIndex !== -1 && portIndex + 1 < args.length ? args[portIndex + 1] : undefined;
const apiPort = rawPort && rawPort.trim() !== '' ? rawPort : '3001';
const apiPortNum = parseInt(apiPort, 10);
if (isNaN(apiPortNum)) {
console.error(`❌ Invalid port: ${apiPort}`);
process.exit(1);
}
// WebUI port is API port - 1 (so API 3001 → WebUI 3000, API 6767 → WebUI 6766)
const webuiPort = String(apiPortNum - 1);
// Cleanup function
function cleanup() {
console.log('\n🛑 Shutting down servers...');
if (apiProcess) {
apiProcess.kill('SIGTERM');
}
if (webuiProcess) {
webuiProcess.kill('SIGTERM');
}
process.exit(0);
}
// Handle exit signals
process.on('SIGINT', cleanup);
process.on('SIGTERM', cleanup);
console.log('🔨 Building packages...\n');
try {
// Build all packages (turbo handles dependency graph)
// This ensures webui dependencies like client-sdk are built
execSync('pnpm run build:packages', {
stdio: 'inherit',
cwd: rootDir,
});
console.log('✅ Build complete!\n');
} catch (err) {
console.error('❌ Build failed:', err);
process.exit(1);
}
console.log('🚀 Starting development servers...\n');
// Start API server directly from dist
const cliArgs = [cliPath, '--mode', 'server', '--port', apiPort];
if (agentPath) {
console.log(`📡 Starting API server on port ${apiPort} with agent: ${agentPath}...`);
cliArgs.push('--agent', agentPath);
} else {
console.log(`📡 Starting API server on port ${apiPort}...`);
}
apiProcess = spawn('node', cliArgs, {
stdio: ['inherit', 'pipe', 'pipe'],
cwd: rootDir,
env: {
...process.env,
PORT: apiPort,
DEXTO_DEV_MODE: 'true', // Force use of repo config for development
},
});
// Function to start WebUI (called when API is ready)
function startWebUI() {
if (webuiStarted) return;
webuiStarted = true;
console.log('\n🎨 Starting WebUI dev server...');
webuiProcess = spawn('pnpm', ['exec', 'vite', '--port', webuiPort], {
cwd: join(rootDir, 'packages', 'webui'),
stdio: ['inherit', 'pipe', 'pipe'],
env: {
...process.env,
DEXTO_API_PORT: apiPort,
},
});
// Prefix WebUI output and detect when ready
if (webuiProcess.stdout) {
webuiProcess.stdout.on('data', (data) => {
const lines = data.toString().split('\n').filter(Boolean);
lines.forEach((line: string) => {
console.log(`[UI] ${line}`);
// Open browser when Vite is ready (looks for "Local:" message)
if (!browserOpened && line.includes('Local:')) {
browserOpened = true;
// Extract URL from Vite output (e.g., "Local: http://localhost:3001/")
const urlMatch = line.match(/http:\/\/localhost:\d+/);
const webUrl = urlMatch ? urlMatch[0] : `http://localhost:${webuiPort}`;
console.log(`\n🌐 Opening browser at ${webUrl}...`);
open(webUrl, { wait: false }).catch((err) => {
console.log(` Could not open browser automatically: ${err.message}`);
console.log(` Please open ${webUrl} manually`);
});
}
});
});
}
if (webuiProcess.stderr) {
webuiProcess.stderr.on('data', (data) => {
const lines = data.toString().split('\n').filter(Boolean);
lines.forEach((line: string) => {
console.error(`[UI] ${line}`);
});
});
}
webuiProcess.on('error', (err) => {
console.error('❌ Failed to start WebUI dev server:', err);
cleanup();
});
console.log('\n✨ Development servers ready!');
console.log(` API: http://localhost:${apiPort} (from dist build)`);
console.log(' WebUI: Starting... (see Vite output for URL)');
console.log('\nPress Ctrl+C to stop all servers\n');
}
// Prefix API output
if (apiProcess.stdout) {
apiProcess.stdout.on('data', (data) => {
const lines = data.toString().split('\n').filter(Boolean);
lines.forEach((line: string) => {
console.log(`[API] ${line}`);
// Start WebUI when API server is ready
if (!webuiStarted && line.includes('Server running at')) {
startWebUI();
}
});
});
}
if (apiProcess.stderr) {
apiProcess.stderr.on('data', (data) => {
const lines = data.toString().split('\n').filter(Boolean);
lines.forEach((line: string) => {
console.error(`[API] ${line}`);
});
});
}
apiProcess.on('error', (err) => {
console.error('❌ Failed to start API server:', err);
cleanup();
});
// Fallback: Start WebUI after 60 seconds if API ready signal not detected
setTimeout(() => {
if (!webuiStarted) {
console.log('\n⚠ API ready signal not detected after 60s, starting WebUI anyway...');
startWebUI();
}
}, 60000);

View File

@@ -0,0 +1,323 @@
#!/bin/bash
# Extract review comments from GitHub PR with combinable filters
# Usage: ./extract-review-comments.sh OWNER/REPO PR_NUMBER [--reviewer LOGIN_ID] [FLAGS]
set -e
set -o pipefail
# Check dependencies early
if ! command -v gh >/dev/null 2>&1; then
echo "❌ Error: GitHub CLI (gh) is required but not installed" >&2
exit 1
fi
if ! command -v jq >/dev/null 2>&1; then
echo "❌ Error: jq is required but not installed" >&2
exit 1
fi
# Function to show usage
show_usage() {
echo "Usage: $0 OWNER/REPO PR_NUMBER [--reviewer LOGIN_ID] [FLAGS]"
echo ""
echo "OPTIONS:"
echo " --reviewer LOGIN_ID Filter comments by specific reviewer (e.g., coderabbitai[bot], rahulkarajgikar)"
echo " --limit NUMBER Maximum number of comments to display (default: all)"
echo " --offset NUMBER Number of comments to skip (default: 0, for pagination)"
echo ""
echo "FLAGS (can be combined):"
echo " --latest-only Latest review by timestamp"
echo " --latest-actionable Latest review with substantial feedback (has top-level summary)"
echo " --unresolved-only Only unresolved comments"
echo ""
echo "Examples:"
echo " $0 truffle-ai/dexto 293 # All comments from all reviewers"
echo " $0 truffle-ai/dexto 293 --reviewer coderabbitai[bot] # All CodeRabbit comments"
echo " $0 truffle-ai/dexto 293 --reviewer rahulkarajgikar --latest-actionable # Latest actionable human review"
echo " $0 truffle-ai/dexto 293 --reviewer coderabbitai[bot] --unresolved-only # Unresolved CodeRabbit comments"
echo " $0 truffle-ai/dexto 293 --latest-actionable --unresolved-only # Unresolved from any latest actionable review"
echo ""
echo "Pagination examples:"
echo " $0 truffle-ai/dexto 293 --unresolved-only --limit 10 # First 10 comments"
echo " $0 truffle-ai/dexto 293 --unresolved-only --limit 10 --offset 10 # Next 10 comments (page 2)"
echo " $0 truffle-ai/dexto 293 --unresolved-only --limit 5 --offset 20 # Comments 21-25"
}
if [ $# -lt 2 ]; then
show_usage
exit 1
fi
REPO="$1"
PR_NUMBER="$2"
shift 2 # Remove first two args, leaving only flags
# Parse flags
LATEST_ONLY=false
LATEST_ACTIONABLE=false
UNRESOLVED_ONLY=false
REVIEWER=""
LIMIT=""
OFFSET="0"
while [[ $# -gt 0 ]]; do
case $1 in
--reviewer)
if [ -z "$2" ] || [[ "$2" == --* || "$2" == -* ]]; then
echo "❌ Error: --reviewer requires a login ID"
exit 1
fi
REVIEWER="$2"
shift 2
;;
--latest-only)
LATEST_ONLY=true
shift
;;
--latest-actionable)
LATEST_ACTIONABLE=true
shift
;;
--unresolved-only)
UNRESOLVED_ONLY=true
shift
;;
--limit)
if [ -z "$2" ]; then
echo "❌ Error: --limit requires a number"
exit 1
fi
if ! [[ "$2" =~ ^[0-9]+$ ]] || [ "$2" -le 0 ]; then
echo "❌ Error: --limit must be a positive integer"
exit 1
fi
LIMIT="$2"
shift 2
;;
--offset)
if [ -z "$2" ]; then
echo "❌ Error: --offset requires a number"
exit 1
fi
if ! [[ "$2" =~ ^[0-9]+$ ]]; then
echo "❌ Error: --offset must be a non-negative integer"
exit 1
fi
OFFSET="$2"
shift 2
;;
--help|-h)
show_usage
exit 0
;;
*)
echo "Unknown flag: $1"
show_usage
exit 1
;;
esac
done
# Validate conflicting flags
if [ "$LATEST_ONLY" = true ] && [ "$LATEST_ACTIONABLE" = true ]; then
echo "❌ Error: Cannot use both --latest-only and --latest-actionable"
exit 1
fi
# Extract owner and repo name
IFS='/' read -r OWNER REPO_NAME <<< "$REPO"
# Build display text based on reviewer filter
if [ -n "$REVIEWER" ]; then
echo "🤖 Extracting $REVIEWER comments from $REPO PR #$PR_NUMBER"
BASE_DESC="$REVIEWER comments"
else
echo "🤖 Extracting review comments from $REPO PR #$PR_NUMBER"
BASE_DESC="review comments"
fi
# Build mode description
MODE_DESC="All $BASE_DESC"
if [ "$LATEST_ONLY" = true ]; then
MODE_DESC="Latest review (by timestamp)"
elif [ "$LATEST_ACTIONABLE" = true ]; then
MODE_DESC="Latest actionable review"
fi
if [ "$UNRESOLVED_ONLY" = true ]; then
if [ "$LATEST_ONLY" = true ] || [ "$LATEST_ACTIONABLE" = true ]; then
MODE_DESC="$MODE_DESC - unresolved only"
else
MODE_DESC="All unresolved $BASE_DESC"
fi
fi
# Add pagination info to mode description
if [ -n "$LIMIT" ]; then
if [ "$OFFSET" != "0" ]; then
MODE_DESC="$MODE_DESC (showing $LIMIT comments starting from #$((OFFSET + 1)))"
else
MODE_DESC="$MODE_DESC (showing first $LIMIT comments)"
fi
fi
echo "📋 Mode: $MODE_DESC"
echo "=================================================================="
# Step 1: Determine the scope (which review(s) to look at)
TARGET_REVIEW_ID=""
if [ "$LATEST_ONLY" = true ]; then
# Get the most recent review by timestamp
if [ -n "$REVIEWER" ]; then
# Filter by specific reviewer, then sort to most recent
TARGET_REVIEW_ID=$(gh api "repos/$REPO/pulls/$PR_NUMBER/reviews" \
| jq -r --arg reviewer "$REVIEWER" '[.[] | select(.user.login == $reviewer)] | sort_by(.submitted_at // .created_at // .id) | last | .id')
REVIEWER_DESC=" from $REVIEWER"
else
# Any reviewer, sort to most recent
TARGET_REVIEW_ID=$(gh api "repos/$REPO/pulls/$PR_NUMBER/reviews" \
| jq -r '[.[]] | sort_by(.submitted_at // .created_at // .id) | last | .id')
REVIEWER_DESC=""
fi
if [ -z "$TARGET_REVIEW_ID" ] || [ "$TARGET_REVIEW_ID" = "null" ]; then
echo "❌ No reviews found${REVIEWER_DESC} for this PR"
exit 1
fi
echo "🔍 Latest review ID: $TARGET_REVIEW_ID"
elif [ "$LATEST_ACTIONABLE" = true ]; then
# Get the most recent review with a body (top-level summary = actionable review)
if [ -n "$REVIEWER" ]; then
# Filter by specific reviewer, then sort to most recent actionable
TARGET_REVIEW_ID=$(gh api "repos/$REPO/pulls/$PR_NUMBER/reviews" \
| jq -r --arg reviewer "$REVIEWER" '[.[] | select(.user.login == $reviewer and .body != null and .body != "")] | sort_by(.submitted_at // .created_at // .id) | last | .id')
REVIEWER_DESC=" from $REVIEWER"
else
# Any reviewer, most recent actionable
TARGET_REVIEW_ID=$(gh api "repos/$REPO/pulls/$PR_NUMBER/reviews" \
| jq -r '[.[] | select(.body != null and .body != "")] | sort_by(.submitted_at // .created_at // .id) | last | .id')
REVIEWER_DESC=""
fi
if [ -z "$TARGET_REVIEW_ID" ] || [ "$TARGET_REVIEW_ID" = "null" ]; then
echo "❌ No actionable reviews found${REVIEWER_DESC} for this PR"
exit 1
fi
echo "🔍 Latest actionable review ID: $TARGET_REVIEW_ID"
fi
# Step 2: Get the base set of comments based on scope
if [ -n "$TARGET_REVIEW_ID" ]; then
# Get comments from specific review (already filtered by reviewer if specified)
BASE_COMMENTS=$(gh api "repos/$REPO/pulls/$PR_NUMBER/comments" --paginate \
| jq -s --arg review_id "$TARGET_REVIEW_ID" '[ .[] | .[] | select(.pull_request_review_id == ($review_id | tonumber)) ]')
else
# Get all comments, optionally filtered by reviewer
if [ -n "$REVIEWER" ]; then
BASE_COMMENTS=$(gh api "repos/$REPO/pulls/$PR_NUMBER/comments" --paginate \
| jq -s --arg reviewer "$REVIEWER" '[ .[] | .[] | select(.user.login == $reviewer) ]')
else
BASE_COMMENTS=$(gh api "repos/$REPO/pulls/$PR_NUMBER/comments" --paginate \
| jq -s '[ .[] | .[] | select(.user.login) ]') # All comments from any reviewer
fi
fi
# Step 3: Apply unresolved filter if requested
if [ "$UNRESOLVED_ONLY" = true ]; then
# We need to cross-reference with GraphQL data for resolution status
echo "🔄 Checking resolution status..."
# Get unresolved thread data from GraphQL
UNRESOLVED_THREADS=$(gh api graphql -f query='
query($owner: String!, $repo: String!, $number: Int!) {
repository(owner: $owner, name: $repo) {
pullRequest(number: $number) {
reviewThreads(first: 100) {
nodes {
id
isResolved
comments(first: 100) {
nodes {
id
databaseId
author { login }
}
}
}
}
}
}
}' \
-f owner="$OWNER" \
-f repo="$REPO_NAME" \
-F number="$PR_NUMBER" \
| jq '[.data.repository.pullRequest.reviewThreads.nodes[] |
select(.isResolved == false) |
.comments.nodes[] | .databaseId]')
# Filter BASE_COMMENTS to only include unresolved comment IDs
FILTERED_COMMENTS=$(echo "$BASE_COMMENTS" | jq --argjson unresolved_ids "$UNRESOLVED_THREADS" '
[.[] | select(.id as $id | $unresolved_ids | index($id))]')
else
FILTERED_COMMENTS="$BASE_COMMENTS"
fi
# Step 4: Sort comments by file path and line number
SORTED_COMMENTS=$(echo "$FILTERED_COMMENTS" | jq 'sort_by([.path, .line // 0])')
# Step 5: Apply pagination if specified
if [ -n "$LIMIT" ]; then
PAGINATED_COMMENTS=$(echo "$SORTED_COMMENTS" | jq --argjson limit "$LIMIT" --argjson offset "$OFFSET" '
.[$offset:$offset + $limit]')
else
PAGINATED_COMMENTS="$SORTED_COMMENTS"
fi
# Step 6: Count and display results
TOTAL_COUNT=$(echo "$SORTED_COMMENTS" | jq length)
DISPLAYED_COUNT=$(echo "$PAGINATED_COMMENTS" | jq length)
if [ "$TOTAL_COUNT" -eq 0 ]; then
echo "📊 No comments found matching the specified criteria"
echo ""
echo "✅ Done! Use 'gh pr view $PR_NUMBER --repo $REPO --web' to view the PR in browser"
exit 0
fi
# Display the comments with GitHub links
echo "$PAGINATED_COMMENTS" | jq -r --arg repo "$REPO" --arg pr "$PR_NUMBER" '.[] |
"📄 " + .path + ":" + (.line | tostring) + "\n" +
"🆔 Comment ID: " + (.id | tostring) + "\n" +
"🔗 GitHub Link: https://github.com/" + $repo + "/pull/" + $pr + "#discussion_r" + (.id | tostring) + "\n" +
"📅 Created: " + .created_at + "\n" +
"👍 Reactions: " + (.reactions.total_count | tostring) +
(if .pull_request_review_id then "\n🔗 Review ID: " + (.pull_request_review_id | tostring) else "" end) +
"\n---\n" + .body + "\n" +
"=================================================================="
'
echo ""
if [ -n "$LIMIT" ]; then
TOTAL_PAGES=$(( (TOTAL_COUNT + LIMIT - 1) / LIMIT ))
CURRENT_PAGE=$(( OFFSET / LIMIT + 1 ))
echo "📊 Summary: Showing $DISPLAYED_COUNT of $TOTAL_COUNT total comments (Page $CURRENT_PAGE of $TOTAL_PAGES)"
# Show pagination hints
if [ $CURRENT_PAGE -gt 1 ]; then
PREV_OFFSET=$((OFFSET - LIMIT))
if [ $PREV_OFFSET -lt 0 ]; then PREV_OFFSET=0; fi
echo "⬅️ Previous page: $0 $REPO $PR_NUMBER $(echo "$@" | sed "s/--offset [0-9]*//" | sed "s/$/ --offset $PREV_OFFSET/")"
fi
if [ $CURRENT_PAGE -lt $TOTAL_PAGES ]; then
NEXT_OFFSET=$((OFFSET + LIMIT))
echo "➡️ Next page: $0 $REPO $PR_NUMBER $(echo "$@" | sed "s/--offset [0-9]*//" | sed "s/$/ --offset $NEXT_OFFSET/")"
fi
else
echo "📊 Summary: Found $TOTAL_COUNT comments matching criteria (sorted by file/line)"
fi
echo ""
echo "✅ Done! Use 'gh pr view $PR_NUMBER --repo $REPO --web' to view the PR in browser"

View File

@@ -0,0 +1,219 @@
#!/usr/bin/env tsx
/**
* Syncs OpenAPI specification from Hono server routes to docs
*
* Usage:
* pnpm run sync-openapi-docs # Update the docs file
* pnpm run sync-openapi-docs:check # Verify docs are up-to-date (CI)
*
* This script creates a mock agent and Hono app instance to extract
* the OpenAPI schema without needing a running server or real agent.
*/
import fs from 'node:fs';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { execSync } from 'node:child_process';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const CHECK_MODE = process.argv.includes('--check');
const OUTPUT_PATH = path.join(__dirname, '../docs/static/openapi/openapi.json');
const SERVER_DIST_PATH = path.join(__dirname, '../packages/server/dist/hono/index.js');
async function syncOpenAPISpec() {
try {
if (CHECK_MODE) {
console.log('🔍 Checking if OpenAPI docs are up-to-date...\n');
} else {
console.log('📝 Syncing OpenAPI specification to docs...\n');
}
// Build server package if not built or check mode
if (!fs.existsSync(SERVER_DIST_PATH)) {
console.log('📦 Server package not built, building now...\n');
try {
execSync('pnpm --filter @dexto/server... build', {
stdio: 'inherit',
cwd: path.join(__dirname, '..'),
});
console.log('✓ Server package built successfully\n');
} catch (err) {
throw new Error(
'Failed to build server package. Please fix build errors and try again.'
);
}
}
// Import server package
let createDextoApp: any;
try {
const serverModule = await import(SERVER_DIST_PATH);
createDextoApp = serverModule.createDextoApp;
if (!createDextoApp) {
throw new Error('createDextoApp not exported from server package');
}
} catch (err) {
if (err instanceof Error && err.message.includes('Cannot find module')) {
throw new Error('Failed to import server package. Run: pnpm run build:server');
}
throw err;
}
// Create mock event bus (WebhookSubscriber needs this)
const mockEventBus = {
on: () => {},
off: () => {},
emit: () => {},
once: () => {},
removeAllListeners: () => {},
};
// Create mock agent using Proxy - handles all method calls gracefully
const mockAgent: any = new Proxy(
{ agentEventBus: mockEventBus },
{
get: (target, prop) => {
if (prop === 'agentEventBus') return mockEventBus;
if (prop === 'getCard') {
return () => ({
name: 'Dexto',
description: 'AI Agent Framework',
version: '1.0.0',
});
}
return () => Promise.resolve(null);
},
}
);
// Create mock agents context for agent management routes
const mockAgentsContext = {
switchAgentById: async (agentId: string) => ({ id: agentId, name: agentId }),
switchAgentByPath: async (filePath: string) => ({
id: 'custom',
name: filePath,
}),
resolveAgentInfo: async (agentId: string) => ({ id: agentId, name: agentId }),
ensureAgentAvailable: () => {},
getActiveAgentId: () => 'default',
};
// Create Hono app with mock agent and agents context
const app = createDextoApp({
getAgent: () => mockAgent,
getAgentCard: () => mockAgent.getCard(),
agentsContext: mockAgentsContext,
});
// Fetch OpenAPI spec via app.fetch (no server needed!)
const req = new globalThis.Request('http://localhost/openapi.json');
const res = await app.fetch(req);
if (!res.ok) {
throw new Error(
`OpenAPI endpoint returned ${res.status} ${res.statusText}\n` +
' This indicates a problem with the Hono app configuration'
);
}
// Parse JSON response
let spec: any;
try {
spec = await res.json();
} catch (err) {
throw new Error(
'OpenAPI endpoint returned invalid JSON\n' +
` Response status: ${res.status}\n` +
` Response type: ${res.headers.get('content-type')}`
);
}
// Validate spec structure
if (!spec || typeof spec !== 'object') {
throw new Error('OpenAPI spec is not an object');
}
if (!spec.openapi) {
throw new Error('OpenAPI spec missing "openapi" version field');
}
if (!spec.paths || typeof spec.paths !== 'object') {
throw new Error('OpenAPI spec missing "paths" object');
}
const routeCount = Object.keys(spec.paths).length;
const newContent = JSON.stringify(spec, null, 2) + '\n';
console.log(`✓ Generated OpenAPI spec (${routeCount} routes)`);
// Check mode: verify file is up-to-date
if (CHECK_MODE) {
if (!fs.existsSync(OUTPUT_PATH)) {
console.error(`\n❌ OpenAPI docs file not found`);
console.error(` Expected: ${path.relative(process.cwd(), OUTPUT_PATH)}`);
console.error(' Run: pnpm run sync-openapi-docs\n');
process.exit(1);
}
let existingContent: string;
try {
existingContent = fs.readFileSync(OUTPUT_PATH, 'utf-8');
} catch (err) {
throw new Error(
`Failed to read existing OpenAPI docs file\n` +
` Path: ${OUTPUT_PATH}\n` +
` Error: ${err instanceof Error ? err.message : String(err)}`
);
}
if (existingContent !== newContent) {
console.error('\n❌ OpenAPI docs are out of sync!');
console.error(` File: ${path.relative(process.cwd(), OUTPUT_PATH)}`);
console.error(' Run: pnpm run sync-openapi-docs\n');
process.exit(1);
}
console.log('✅ OpenAPI docs are up-to-date!\n');
process.exit(0);
}
// Sync mode: write the file
const outputDir = path.dirname(OUTPUT_PATH);
try {
fs.mkdirSync(outputDir, { recursive: true });
} catch (err) {
throw new Error(
`Failed to create output directory\n` +
` Path: ${outputDir}\n` +
` Error: ${err instanceof Error ? err.message : String(err)}`
);
}
try {
fs.writeFileSync(OUTPUT_PATH, newContent, 'utf-8');
} catch (err) {
throw new Error(
`Failed to write OpenAPI docs file\n` +
` Path: ${OUTPUT_PATH}\n` +
` Error: ${err instanceof Error ? err.message : String(err)}`
);
}
console.log(`✅ Synced to: ${path.relative(process.cwd(), OUTPUT_PATH)}`);
console.log(` Routes: ${routeCount}`);
console.log(` Version: ${spec.openapi}\n`);
process.exit(0);
} catch (error) {
console.error('\n❌ Failed to sync OpenAPI docs\n');
if (error instanceof Error) {
console.error(error.message);
} else {
console.error(String(error));
}
console.error('');
process.exit(1);
}
}
syncOpenAPISpec();

View File

@@ -0,0 +1,244 @@
#!/usr/bin/env tsx
/**
* Installs the dexto CLI globally using a local npm registry (verdaccio).
* This mimics exactly what `npm install -g dexto` does when published to npm.
*
* Process:
* 1. Start verdaccio (local npm registry) in background
* 2. Publish all @dexto/* packages and dexto CLI to it
* 3. Install dexto globally from local registry (npm resolves deps like production)
* 4. Stop verdaccio and clean up
*
* This ensures peer dependencies resolve correctly through the dependency tree,
* exactly as they would when users install from npm.
*/
import { execSync, spawn, ChildProcess } from 'child_process';
import { existsSync, rmSync, mkdirSync, writeFileSync } from 'fs';
import { join } from 'path';
const REGISTRY_URL = 'http://localhost:4873';
const VERDACCIO_CONFIG_DIR = join(process.cwd(), '.verdaccio');
// Packages in dependency order (dependencies first)
const PACKAGES = [
{ name: '@dexto/analytics', path: 'packages/analytics' },
{ name: '@dexto/core', path: 'packages/core' },
{ name: '@dexto/registry', path: 'packages/registry' },
{ name: '@dexto/tools-filesystem', path: 'packages/tools-filesystem' },
{ name: '@dexto/tools-process', path: 'packages/tools-process' },
{ name: '@dexto/tools-todo', path: 'packages/tools-todo' },
{ name: '@dexto/image-local', path: 'packages/image-local' },
{ name: '@dexto/agent-management', path: 'packages/agent-management' },
{ name: '@dexto/server', path: 'packages/server' },
{ name: 'dexto', path: 'packages/cli' },
];
let verdaccioProcess: ChildProcess | null = null;
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function waitForRegistry(maxAttempts = 30): Promise<boolean> {
for (let i = 0; i < maxAttempts; i++) {
try {
execSync(`curl -s ${REGISTRY_URL} > /dev/null 2>&1`, { stdio: 'ignore' });
return true;
} catch {
await sleep(500);
}
}
return false;
}
function startVerdaccio(): ChildProcess {
console.log('🚀 Starting local npm registry (verdaccio)...');
// Create minimal config for verdaccio
mkdirSync(VERDACCIO_CONFIG_DIR, { recursive: true });
const configPath = join(VERDACCIO_CONFIG_DIR, 'config.yaml');
const config = `
storage: ${join(VERDACCIO_CONFIG_DIR, 'storage')}
auth:
htpasswd:
file: ${join(VERDACCIO_CONFIG_DIR, 'htpasswd')}
max_users: -1
uplinks:
npmjs:
url: https://registry.npmjs.org/
packages:
'@dexto/*':
access: $all
publish: $anonymous
unpublish: $anonymous
'dexto':
access: $all
publish: $anonymous
unpublish: $anonymous
'**':
access: $all
proxy: npmjs
server:
keepAliveTimeout: 60
log: { type: stdout, format: pretty, level: warn }
`;
writeFileSync(configPath, config);
const proc = spawn('npx', ['verdaccio', '--config', configPath], {
stdio: ['ignore', 'pipe', 'pipe'],
detached: false,
});
proc.stdout?.on('data', (data) => {
const str = data.toString();
if (str.includes('warn') || str.includes('error')) {
process.stdout.write(` [verdaccio] ${str}`);
}
});
proc.stderr?.on('data', (data) => {
const str = data.toString();
if (!str.includes('npm warn')) {
process.stderr.write(` [verdaccio] ${str}`);
}
});
return proc;
}
function stopVerdaccio() {
if (verdaccioProcess) {
console.log('🛑 Stopping local registry...');
verdaccioProcess.kill('SIGTERM');
verdaccioProcess = null;
}
}
function cleanup() {
stopVerdaccio();
if (existsSync(VERDACCIO_CONFIG_DIR)) {
console.log('🧹 Cleaning up verdaccio data...');
rmSync(VERDACCIO_CONFIG_DIR, { recursive: true, force: true });
}
}
function publishPackage(pkg: { name: string; path: string }) {
console.log(` 📤 Publishing ${pkg.name}...`);
try {
// Create a temporary .npmrc in the package directory with fake auth for local registry
const pkgDir = join(process.cwd(), pkg.path);
const npmrcPath = join(pkgDir, '.npmrc');
const npmrcContent = `//localhost:4873/:_authToken="fake-token-for-local-testing"\n`;
writeFileSync(npmrcPath, npmrcContent);
try {
// Use pnpm publish to correctly resolve workspace:* dependencies to actual versions
execSync(`pnpm publish --registry ${REGISTRY_URL} --no-git-checks`, {
cwd: pkgDir,
stdio: ['ignore', 'ignore', 'pipe'],
});
} finally {
// Clean up the temporary .npmrc
if (existsSync(npmrcPath)) {
rmSync(npmrcPath);
}
}
} catch (error: any) {
// Ignore "already published" errors
const stderr = error.stderr?.toString() || '';
if (!stderr.includes('cannot publish over')) {
throw error;
}
console.log(` (already published, skipping)`);
}
}
async function main() {
const rootDir = process.cwd();
// Ensure we're in the right directory
if (!existsSync(join(rootDir, 'packages/cli/package.json'))) {
console.error('❌ Must run from repository root');
process.exit(1);
}
// Clean up any previous state
cleanup();
// Register cleanup handlers
process.on('SIGINT', () => {
cleanup();
process.exit(1);
});
process.on('SIGTERM', () => {
cleanup();
process.exit(1);
});
process.on('exit', cleanup);
try {
// Start verdaccio
verdaccioProcess = startVerdaccio();
// Wait for registry to be ready
console.log(' ⏳ Waiting for registry to start...');
const ready = await waitForRegistry();
if (!ready) {
throw new Error('Verdaccio failed to start');
}
console.log(' ✓ Registry ready');
// Publish all packages
console.log('📦 Publishing packages to local registry...');
for (const pkg of PACKAGES) {
publishPackage(pkg);
}
console.log(' ✓ All packages published');
// Uninstall existing global dexto (both npm and pnpm)
console.log('🗑️ Removing existing global dexto...');
let removedAny = false;
try {
execSync('npm uninstall -g dexto', { stdio: 'ignore' });
console.log(' ✓ Removed npm global installation');
removedAny = true;
} catch {
// npm global not installed
}
try {
// Remove pnpm global link if it exists
const pnpmBinDir = execSync('pnpm bin -g', { encoding: 'utf-8' }).trim();
const pnpmDextoPath = join(pnpmBinDir, 'dexto');
if (existsSync(pnpmDextoPath)) {
rmSync(pnpmDextoPath, { force: true });
console.log(' ✓ Removed pnpm global link');
removedAny = true;
}
} catch {
// pnpm not available or no global link
}
if (!removedAny) {
console.log(' (no existing installation)');
}
// Install from local registry
console.log('📥 Installing dexto globally from local registry...');
execSync(`npm install -g dexto --registry ${REGISTRY_URL}`, {
stdio: 'inherit',
});
console.log('');
console.log('✅ Successfully installed dexto globally!');
console.log(' Run "dexto --help" to get started');
} catch (error) {
console.error('❌ Installation failed:', error);
process.exit(1);
} finally {
cleanup();
}
}
main();

102
dexto/scripts/quality-checks.sh Executable file
View File

@@ -0,0 +1,102 @@
#!/bin/bash
# Quality Checks Script
# Runs individual or all quality checks with minimal output on success
#
# Usage:
# ./quality-checks.sh build - Run build, show last 200 lines on failure (default)
# ./quality-checks.sh build 100 - Run build, show last 100 lines on failure
# ./quality-checks.sh build all - Run build, show all output on failure
# ./quality-checks.sh test - Run tests, show last 200 lines on failure (default)
# ./quality-checks.sh test 50 - Run tests, show last 50 lines on failure
# ./quality-checks.sh test all - Run tests, show all output on failure
# ./quality-checks.sh lint - Run lint, show last 200 lines on failure (default)
# ./quality-checks.sh lint all - Run lint, show all output on failure
# ./quality-checks.sh typecheck - Run typecheck, show last 200 lines on failure (default)
# ./quality-checks.sh typecheck 150 - Run typecheck, show last 150 lines on failure
# ./quality-checks.sh typecheck all - Run typecheck, show all output on failure
# ./quality-checks.sh all - Run all checks in order (default: 200 lines)
# ./quality-checks.sh all 100 - Run all checks, show last 100 lines on failure
# ./quality-checks.sh all all - Run all checks, show all output on failure
set -e
# Helper function to run a check with output captured
run_check() {
local cmd="$1"
local name="$2"
local output_lines="$3"
local tmpdir="/tmp/build"
local tmpfile="${tmpdir}/dexto-${name}-$$.log"
# Ensure temp directory exists
mkdir -p "$tmpdir"
# Run once, capture all output
if $cmd > "$tmpfile" 2>&1; then
# Success - clean up and report
rm -f "$tmpfile"
echo "${name} passed"
return 0
else
# Failure - show output and clean up
echo "${name} failed:"
echo ""
if [ "$output_lines" = "all" ]; then
cat "$tmpfile"
else
tail -n "$output_lines" "$tmpfile"
fi
rm -f "$tmpfile"
exit 1
fi
}
# Parse command arguments
CHECK_TYPE="${1:-all}"
OUTPUT_LINES="${2:-200}"
# Validate OUTPUT_LINES is either "all" or numeric
if [ "$OUTPUT_LINES" != "all" ] && ! [[ "$OUTPUT_LINES" =~ ^[0-9]+$ ]]; then
echo "Error: OUTPUT_LINES must be a number or 'all', got '$OUTPUT_LINES'" >&2
exit 1
fi
case "$CHECK_TYPE" in
build)
run_check "pnpm run build" "Build" "$OUTPUT_LINES"
;;
test)
run_check "pnpm test" "Tests" "$OUTPUT_LINES"
;;
lint)
run_check "pnpm run lint" "Lint" "$OUTPUT_LINES"
;;
typecheck)
run_check "pnpm run typecheck" "Typecheck" "$OUTPUT_LINES"
;;
openapi-docs)
run_check "pnpm run sync-openapi-docs:check" "OpenAPI Docs" "$OUTPUT_LINES"
;;
all)
run_check "pnpm run build" "Build" "$OUTPUT_LINES"
run_check "pnpm run sync-openapi-docs:check" "OpenAPI Docs" "$OUTPUT_LINES"
run_check "pnpm test" "Tests" "$OUTPUT_LINES"
run_check "pnpm run lint" "Lint" "$OUTPUT_LINES"
run_check "pnpm run typecheck" "Typecheck" "$OUTPUT_LINES"
echo ""
echo "All quality checks passed! ✨"
;;
*)
echo "Error: Unknown check type '$CHECK_TYPE'"
echo ""
echo "Usage: $0 {build|test|lint|typecheck|openapi-docs|all} [lines|all]"
echo "Examples:"
echo " $0 build - Show last 200 lines on failure (default)"
echo " $0 build 100 - Show last 100 lines on failure"
echo " $0 build all - Show all output on failure"
exit 1
;;
esac

265
dexto/scripts/test_api.sh Executable file
View File

@@ -0,0 +1,265 @@
#!/usr/bin/env bash
set -euo pipefail
BASE_URL=${1:-"http://localhost:3001"}
cyan() { printf "\033[36m%s\033[0m" "$1"; }
green() { printf "\033[32m%s\033[0m" "$1"; }
red() { printf "\033[31m%s\033[0m" "$1"; }
yellow() { printf "\033[33m%s\033[0m" "$1"; }
run_test() {
local name="$1" method="$2" path="$3" expected_code="$4" data="${5:-}"
local url="${BASE_URL}${path}"
local resp http_code resp_body
if [[ -n "${data}" ]]; then
resp=$(curl -sS -H 'Content-Type: application/json' -d "${data}" -X "${method}" "${url}" -w "\n%{http_code}")
else
resp=$(curl -sS -X "${method}" "${url}" -w "\n%{http_code}")
fi
http_code=${resp##*$'\n'}
resp_body=${resp%$'\n'*}
local status
if [[ "${http_code}" == "${expected_code}" ]]; then
status=$(green "PASS")
else
status=$(red "FAIL")
fi
echo "$(cyan "[${status}]") ${name}"
echo " Method: ${method} URL: ${url}"
if [[ -n "${data}" ]]; then
echo " Payload: ${data}"
fi
echo " Expected: ${expected_code} Got: ${http_code}"
echo " Body: ${resp_body}" | sed 's/^/ /'
echo
if [[ "${http_code}" != "${expected_code}" ]]; then
return 1
fi
}
main() {
echo "Running API tests against ${BASE_URL}"; echo
local failures=0
run_test "GET /health" GET "/health" 200 || failures=$((failures+1))
# Catalog replaces legacy providers endpoint
run_test "GET /api/llm/catalog" GET "/api/llm/catalog" 200 || failures=$((failures+1))
run_test "GET /api/llm/catalog?provider=openai,anthropic" GET "/api/llm/catalog?provider=openai,anthropic" 200 || failures=$((failures+1))
run_test "GET /api/llm/catalog?router=vercel" GET "/api/llm/catalog?router=vercel" 200 || failures=$((failures+1))
run_test "GET /api/llm/catalog?fileType=audio" GET "/api/llm/catalog?fileType=audio" 200 || failures=$((failures+1))
run_test "GET /api/llm/catalog?defaultOnly=true" GET "/api/llm/catalog?defaultOnly=true" 200 || failures=$((failures+1))
run_test "GET /api/llm/catalog?mode=flat" GET "/api/llm/catalog?mode=flat" 200 || failures=$((failures+1))
run_test "GET /api/llm/catalog" GET "/api/llm/catalog" 200 || failures=$((failures+1))
run_test "GET /api/llm/current" GET "/api/llm/current" 200 || failures=$((failures+1))
# LLM switch scenarios
run_test "POST /api/llm/switch empty" POST "/api/llm/switch" 400 '{}' || failures=$((failures+1))
run_test "POST /api/llm/switch model wrong type" POST "/api/llm/switch" 400 '{"model":123}' || failures=$((failures+1))
run_test "POST /api/llm/switch unknown provider" POST "/api/llm/switch" 400 '{"provider":"unknown_vendor"}' || failures=$((failures+1))
# Router-only tweak should be allowed
run_test "POST /api/llm/switch router only" POST "/api/llm/switch" 200 '{"router":"vercel"}' || failures=$((failures+1))
run_test "POST /api/llm/switch valid openai" POST "/api/llm/switch" 200 '{"provider":"openai","model":"gpt-5"}' || failures=$((failures+1))
run_test "POST /api/llm/switch session not found" POST "/api/llm/switch" 404 '{"model":"gpt-5","sessionId":"does-not-exist-123"}' || failures=$((failures+1))
# Test missing API key scenario by using empty API key
run_test "POST /api/llm/switch missing API key" POST "/api/llm/switch" 400 '{"provider":"cohere","apiKey":""}' || failures=$((failures+1))
# -------- Advanced LLM switching checks (stateful) --------
# Utilities: JSON parsing helpers (prefer jq, fallback to node)
json_get() {
local json="$1" expr="$2"
if command -v jq >/dev/null 2>&1; then
echo "${json}" | jq -r "${expr}" 2>/dev/null || echo ""
elif command -v node >/dev/null 2>&1; then
node -e "let s='';process.stdin.on('data',c=>s+=c).on('end',()=>{try{const o=JSON.parse(s);const pick=(o,e)=>e.split('.').slice(1).reduce((a,k)=>a?.[k],o);const v=pick(o, process.argv[1]);if(v==null) return console.log('');console.log(typeof v==='object'?JSON.stringify(v):String(v));}catch{console.log('')}});" "${expr}" <<< "${json}"
else
echo "" # No jq/node available; return empty to keep tests running
fi
}
echo "$(yellow '[Stateful]') Router-only update preserves other fields"
# Pre-validate catalog content for openai provider structure
cat_before=$(curl -sS "${BASE_URL}/api/llm/catalog")
env_var_before=$(json_get "${cat_before}" '.providers.openai.primaryEnvVar')
supports_base_before=$(json_get "${cat_before}" '.providers.openai.supportsBaseURL')
routers_before=$(json_get "${cat_before}" '.providers.openai.supportedRouters')
if [ "${env_var_before}" != "OPENAI_API_KEY" ]; then
echo "$(red 'FAIL') catalog.openai.primaryEnvVar expected OPENAI_API_KEY, got: ${env_var_before}"; failures=$((failures+1))
fi
# Validate provider filter (only openai + anthropic present)
cat_filtered=$(curl -sS "${BASE_URL}/api/llm/catalog?provider=openai,anthropic")
if echo "${cat_filtered}" | grep -q '"google"'; then
echo "$(red 'FAIL') provider filter returned unexpected provider 'google'"; failures=$((failures+1))
fi
# Validate router filter (all providers must include router)
cat_router=$(curl -sS "${BASE_URL}/api/llm/catalog?router=vercel")
# quick sanity check: ensure each provider advertises vercel
for p in openai anthropic google groq cohere xai; do
adv=$(json_get "${cat_router}" ".providers.${p}.supportedRouters")
if [ -n "${adv}" ] && ! echo "${adv}" | grep -q "vercel"; then
echo "$(red 'FAIL') provider ${p} missing 'vercel' in router filter"; failures=$((failures+1))
fi
done
# Validate defaultOnly
cat_defaults=$(curl -sS "${BASE_URL}/api/llm/catalog?defaultOnly=true")
# verify that for openai (if present) all models are default=true
if echo "${cat_defaults}" | grep -q '"openai"'; then
defaults_list=$(json_get "${cat_defaults}" '.providers.openai.models')
if echo "${defaults_list}" | grep -q '"default": false'; then
echo "$(red 'FAIL') defaultOnly returned non-default model for openai"; failures=$((failures+1))
fi
fi
# Validate flat mode response shape
flat_resp=$(curl -sS "${BASE_URL}/api/llm/catalog?mode=flat")
flat_first=$(json_get "${flat_resp}" '.models[0].provider')
if [ -z "${flat_first}" ]; then
echo "$(red 'FAIL') flat mode missing models array or provider field"; failures=$((failures+1))
fi
if [ "${supports_base_before}" != "false" ]; then
echo "$(red 'FAIL') catalog.openai.supportsBaseURL expected false, got: ${supports_base_before}"; failures=$((failures+1))
fi
if ! echo "${routers_before}" | grep -q "vercel"; then
echo "$(red 'FAIL') catalog.openai.supportedRouters missing 'vercel'"; failures=$((failures+1))
fi
# Get baseline config
base_resp=$(curl -sS "${BASE_URL}/api/llm/current")
base_provider=$(json_get "${base_resp}" '.config.provider')
base_model=$(json_get "${base_resp}" '.config.model')
base_router=$(json_get "${base_resp}" '.config.router')
base_max_iter=$(json_get "${base_resp}" '.config.maxIterations')
base_temp=$(json_get "${base_resp}" '.config.temperature')
# Determine a target router for this provider
cat_for_router=$(curl -sS "${BASE_URL}/api/llm/catalog")
routers=$(json_get "${cat_for_router}" ".providers.${base_provider}.supportedRouters")
# Pick the other router if available; otherwise reuse current
target_router="${base_router}"
if echo "${routers}" | grep -q "in-built" && [ "${base_router}" != "in-built" ]; then
target_router="in-built"
elif echo "${routers}" | grep -q "vercel" && [ "${base_router}" != "vercel" ]; then
target_router="vercel"
fi
# Perform router-only switch
switch_payload=$(printf '{"router":"%s"}' "${target_router}")
run_test "POST /api/llm/switch router-only -> ${target_router}" POST "/api/llm/switch" 200 "${switch_payload}" || failures=$((failures+1))
# Verify post-switch config
after_resp=$(curl -sS "${BASE_URL}/api/llm/current")
after_provider=$(json_get "${after_resp}" '.config.provider')
after_model=$(json_get "${after_resp}" '.config.model')
after_router=$(json_get "${after_resp}" '.config.router')
after_max_iter=$(json_get "${after_resp}" '.config.maxIterations')
after_temp=$(json_get "${after_resp}" '.config.temperature')
if [ "${after_provider}" != "${base_provider}" ] || [ "${after_model}" != "${base_model}" ]; then
echo "$(red 'FAIL') provider/model changed unexpectedly on router-only switch"; failures=$((failures+1))
fi
if [ "${after_router}" != "${target_router}" ]; then
echo "$(red 'FAIL') router not updated to target (${target_router}); actual: ${after_router}"; failures=$((failures+1))
fi
if [ "${after_max_iter}" != "${base_max_iter}" ]; then
echo "$(red 'FAIL') maxIterations changed unexpectedly (${base_max_iter} -> ${after_max_iter})"; failures=$((failures+1))
fi
if [ "${after_temp}" != "${base_temp}" ]; then
echo "$(red 'FAIL') temperature changed unexpectedly (${base_temp} -> ${after_temp})"; failures=$((failures+1))
fi
# -------- New LLM key APIs (only invalid input cases; avoid mutating .env) --------
run_test "POST /api/llm/key invalid provider" POST "/api/llm/key" 400 '{"provider":"invalid","apiKey":"x"}' || failures=$((failures+1))
run_test "POST /api/llm/key missing apiKey" POST "/api/llm/key" 400 '{"provider":"openai","apiKey":""}' || failures=$((failures+1))
# Revert router to baseline for isolation
if [ "${after_router}" != "${base_router}" ]; then
revert_payload=$(printf '{"router":"%s"}' "${base_router}")
run_test "POST /api/llm/switch revert router -> ${base_router}" POST "/api/llm/switch" 200 "${revert_payload}" || failures=$((failures+1))
fi
# Message endpoints (basic validation)
run_test "POST /api/message no data" POST "/api/message" 400 '{}' || failures=$((failures+1))
run_test "POST /api/message-sync no data" POST "/api/message-sync" 400 '{}' || failures=$((failures+1))
# Reset endpoint
run_test "POST /api/reset valid" POST "/api/reset" 200 '{}' || failures=$((failures+1))
# Agent configuration endpoints
run_test "GET /api/agent/path" GET "/api/agent/path" 200 || failures=$((failures+1))
run_test "GET /api/agent/config" GET "/api/agent/config" 200 || failures=$((failures+1))
run_test "GET /api/agent/config/export" GET "/api/agent/config/export" 200 || failures=$((failures+1))
# Agent validation tests
run_test "POST /api/agent/validate missing yaml" POST "/api/agent/validate" 400 '{}' || failures=$((failures+1))
run_test "POST /api/agent/validate invalid YAML syntax" POST "/api/agent/validate" 200 '{"yaml":"invalid: yaml: content: ["}' || failures=$((failures+1))
run_test "POST /api/agent/validate invalid schema" POST "/api/agent/validate" 200 '{"yaml":"greeting: hello\nllm:\n provider: invalid_provider"}' || failures=$((failures+1))
run_test "POST /api/agent/validate valid YAML" POST "/api/agent/validate" 200 '{"yaml":"greeting: \"Test Agent\"\nllm:\n provider: openai\n model: gpt-5\n apiKey: $OPENAI_API_KEY"}' || failures=$((failures+1))
# Agent config save tests (validation only - avoid mutating actual config)
run_test "POST /api/agent/config missing yaml" POST "/api/agent/config" 400 '{}' || failures=$((failures+1))
run_test "POST /api/agent/config invalid YAML syntax" POST "/api/agent/config" 400 '{"yaml":"invalid: yaml: ["}' || failures=$((failures+1))
run_test "POST /api/agent/config invalid schema" POST "/api/agent/config" 400 '{"yaml":"llm:\n provider: invalid_provider"}' || failures=$((failures+1))
# Session endpoints
run_test "GET /api/sessions" GET "/api/sessions" 200 || failures=$((failures+1))
run_test "POST /api/sessions create" POST "/api/sessions" 201 '{"sessionId":"test-session-123"}' || failures=$((failures+1))
run_test "GET /api/sessions/test-session-123" GET "/api/sessions/test-session-123" 200 || failures=$((failures+1))
run_test "POST /api/sessions/test-session-123/load" POST "/api/sessions/test-session-123/load" 200 '{}' || failures=$((failures+1))
run_test "GET /api/sessions/current" GET "/api/sessions/current" 200 || failures=$((failures+1))
run_test "GET /api/sessions/test-session-123/history" GET "/api/sessions/test-session-123/history" 200 || failures=$((failures+1))
# Search endpoints validation
run_test "GET /api/search/messages no query" GET "/api/search/messages" 400 || failures=$((failures+1))
run_test "GET /api/search/sessions no query" GET "/api/search/sessions" 400 || failures=$((failures+1))
run_test "GET /api/search/messages with query" GET "/api/search/messages?q=test" 200 || failures=$((failures+1))
run_test "GET /api/search/sessions with query" GET "/api/search/sessions?q=test" 200 || failures=$((failures+1))
# MCP endpoints validation
run_test "GET /api/mcp/servers" GET "/api/mcp/servers" 200 || failures=$((failures+1))
run_test "POST /api/mcp/servers no data" POST "/api/mcp/servers" 400 '{}' || failures=$((failures+1))
# Webhook endpoints validation
run_test "POST /api/webhooks no data" POST "/api/webhooks" 400 '{}' || failures=$((failures+1))
run_test "POST /api/webhooks invalid URL" POST "/api/webhooks" 400 '{"url":"not-a-url"}' || failures=$((failures+1))
run_test "POST /api/webhooks valid" POST "/api/webhooks" 201 '{"url":"https://example.com/webhook"}' || failures=$((failures+1))
run_test "GET /api/webhooks" GET "/api/webhooks" 200 || failures=$((failures+1))
# Memory endpoints
run_test "GET /api/memory" GET "/api/memory" 200 || failures=$((failures+1))
run_test "POST /api/memory no data" POST "/api/memory" 400 '{}' || failures=$((failures+1))
run_test "POST /api/memory create" POST "/api/memory" 201 '{"content":"test memory content"}' || failures=$((failures+1))
# Prompts endpoints
run_test "GET /api/prompts" GET "/api/prompts" 200 || failures=$((failures+1))
# Resources endpoints
run_test "GET /api/resources" GET "/api/resources" 200 || failures=$((failures+1))
# Greeting endpoint
run_test "GET /api/greeting" GET "/api/greeting" 200 || failures=$((failures+1))
# A2A discovery
run_test "GET /.well-known/agent-card.json" GET "/.well-known/agent-card.json" 200 || failures=$((failures+1))
# OpenAPI schema
run_test "GET /openapi.json" GET "/openapi.json" 200 || failures=$((failures+1))
# Cleanup test data
run_test "DELETE /api/sessions/test-session-123" DELETE "/api/sessions/test-session-123" 200 || failures=$((failures+1))
if [[ ${failures} -eq 0 ]]; then
echo "$(green "All tests passed")"
exit 0
else
echo "$(red "${failures} test(s) failed")"
exit 1
fi
}
main "$@"