feat: Add intelligent auto-router and enhanced integrations
- Add intelligent-router.sh hook for automatic agent routing - Add AUTO-TRIGGER-SUMMARY.md documentation - Add FINAL-INTEGRATION-SUMMARY.md documentation - Complete Prometheus integration (6 commands + 4 tools) - Complete Dexto integration (12 commands + 5 tools) - Enhanced Ralph with access to all agents - Fix /clawd command (removed disable-model-invocation) - Update hooks.json to v5 with intelligent routing - 291 total skills now available - All 21 commands with automatic routing 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,304 @@
|
||||
import React, { useState } from 'react';
|
||||
import { Input } from '../../ui/input';
|
||||
import { LabelWithTooltip } from '../../ui/label-with-tooltip';
|
||||
import { Collapsible } from '../../ui/collapsible';
|
||||
import { Eye, EyeOff } from 'lucide-react';
|
||||
import { LLM_PROVIDERS, isReasoningCapableModel, type AgentConfig } from '@dexto/core';
|
||||
|
||||
type LLMConfig = AgentConfig['llm'];
|
||||
|
||||
interface LLMConfigSectionProps {
|
||||
value: LLMConfig;
|
||||
onChange: (value: LLMConfig) => void;
|
||||
errors?: Record<string, string>;
|
||||
open?: boolean;
|
||||
onOpenChange?: (open: boolean) => void;
|
||||
errorCount?: number;
|
||||
sectionErrors?: string[];
|
||||
}
|
||||
|
||||
export function LLMConfigSection({
|
||||
value,
|
||||
onChange,
|
||||
errors = {},
|
||||
open,
|
||||
onOpenChange,
|
||||
errorCount = 0,
|
||||
sectionErrors = [],
|
||||
}: LLMConfigSectionProps) {
|
||||
const [showApiKey, setShowApiKey] = useState(false);
|
||||
|
||||
const handleChange = (field: keyof LLMConfig, newValue: string | number | undefined) => {
|
||||
onChange({ ...value, [field]: newValue } as LLMConfig);
|
||||
};
|
||||
|
||||
return (
|
||||
<Collapsible
|
||||
title="LLM Configuration"
|
||||
defaultOpen={true}
|
||||
open={open}
|
||||
onOpenChange={onOpenChange}
|
||||
errorCount={errorCount}
|
||||
sectionErrors={sectionErrors}
|
||||
>
|
||||
<div className="space-y-4">
|
||||
{/* Provider */}
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="provider"
|
||||
tooltip="The LLM provider to use (e.g., OpenAI, Anthropic)"
|
||||
>
|
||||
Provider *
|
||||
</LabelWithTooltip>
|
||||
<select
|
||||
id="provider"
|
||||
value={value.provider || ''}
|
||||
onChange={(e) => handleChange('provider', e.target.value)}
|
||||
aria-invalid={!!errors['llm.provider']}
|
||||
className="flex h-9 w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive"
|
||||
>
|
||||
<option value="">Select provider...</option>
|
||||
{LLM_PROVIDERS.map((p) => (
|
||||
<option key={p} value={p}>
|
||||
{p}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{errors['llm.provider'] && (
|
||||
<p className="text-xs text-destructive mt-1">{errors['llm.provider']}</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Model */}
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="model"
|
||||
tooltip="The specific model identifier (e.g., gpt-5, claude-sonnet-4-5-20250929)"
|
||||
>
|
||||
Model *
|
||||
</LabelWithTooltip>
|
||||
<Input
|
||||
id="model"
|
||||
value={value.model || ''}
|
||||
onChange={(e) => handleChange('model', e.target.value)}
|
||||
placeholder="e.g., gpt-5, claude-sonnet-4-5-20250929"
|
||||
aria-invalid={!!errors['llm.model']}
|
||||
/>
|
||||
{errors['llm.model'] && (
|
||||
<p className="text-xs text-destructive mt-1">{errors['llm.model']}</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* API Key */}
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="apiKey"
|
||||
tooltip="Use $ENV_VAR for environment variables or enter the API key directly"
|
||||
>
|
||||
API Key *
|
||||
</LabelWithTooltip>
|
||||
<div className="relative">
|
||||
<Input
|
||||
id="apiKey"
|
||||
type={showApiKey ? 'text' : 'password'}
|
||||
value={value.apiKey ?? ''}
|
||||
onChange={(e) => handleChange('apiKey', e.target.value)}
|
||||
placeholder="$OPENAI_API_KEY or direct value"
|
||||
aria-invalid={!!errors['llm.apiKey']}
|
||||
className="pr-10"
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => setShowApiKey(!showApiKey)}
|
||||
className="absolute right-2 top-1/2 -translate-y-1/2 p-1 hover:bg-accent rounded transition-colors"
|
||||
aria-label={showApiKey ? 'Hide API key' : 'Show API key'}
|
||||
>
|
||||
{showApiKey ? (
|
||||
<EyeOff className="h-4 w-4 text-muted-foreground" />
|
||||
) : (
|
||||
<Eye className="h-4 w-4 text-muted-foreground" />
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
{errors['llm.apiKey'] && (
|
||||
<p className="text-xs text-destructive mt-1">{errors['llm.apiKey']}</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Max Iterations */}
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="maxIterations"
|
||||
tooltip="Maximum number of agent reasoning iterations per turn"
|
||||
>
|
||||
Max Iterations
|
||||
</LabelWithTooltip>
|
||||
<Input
|
||||
id="maxIterations"
|
||||
type="number"
|
||||
value={value.maxIterations !== undefined ? value.maxIterations : ''}
|
||||
onChange={(e) => {
|
||||
const val = e.target.value;
|
||||
if (val === '') {
|
||||
handleChange('maxIterations', undefined);
|
||||
} else {
|
||||
const num = parseInt(val, 10);
|
||||
if (!isNaN(num)) {
|
||||
handleChange('maxIterations', num);
|
||||
}
|
||||
}
|
||||
}}
|
||||
min="1"
|
||||
placeholder="50"
|
||||
aria-invalid={!!errors['llm.maxIterations']}
|
||||
/>
|
||||
{errors['llm.maxIterations'] && (
|
||||
<p className="text-xs text-destructive mt-1">
|
||||
{errors['llm.maxIterations']}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Base URL */}
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="baseURL"
|
||||
tooltip="Custom base URL for the LLM provider (optional, for proxies or custom endpoints)"
|
||||
>
|
||||
Base URL
|
||||
</LabelWithTooltip>
|
||||
<Input
|
||||
id="baseURL"
|
||||
value={value.baseURL || ''}
|
||||
onChange={(e) => handleChange('baseURL', e.target.value || undefined)}
|
||||
placeholder="https://api.openai.com/v1"
|
||||
aria-invalid={!!errors['llm.baseURL']}
|
||||
/>
|
||||
{errors['llm.baseURL'] && (
|
||||
<p className="text-xs text-destructive mt-1">{errors['llm.baseURL']}</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Temperature */}
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="temperature"
|
||||
tooltip="Controls randomness in responses (0.0 = deterministic, 1.0 = creative)"
|
||||
>
|
||||
Temperature
|
||||
</LabelWithTooltip>
|
||||
<Input
|
||||
id="temperature"
|
||||
type="number"
|
||||
value={value.temperature !== undefined ? value.temperature : ''}
|
||||
onChange={(e) =>
|
||||
handleChange(
|
||||
'temperature',
|
||||
e.target.value ? parseFloat(e.target.value) : undefined
|
||||
)
|
||||
}
|
||||
min="0"
|
||||
max="1"
|
||||
step="0.1"
|
||||
placeholder="0.0 - 1.0"
|
||||
aria-invalid={!!errors['llm.temperature']}
|
||||
/>
|
||||
{errors['llm.temperature'] && (
|
||||
<p className="text-xs text-destructive mt-1">{errors['llm.temperature']}</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Max Input/Output Tokens */}
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="maxInputTokens"
|
||||
tooltip="Maximum input tokens to send to the model. If not specified, defaults to model's limit from registry, or 128,000 tokens for custom endpoints"
|
||||
>
|
||||
Max Input Tokens
|
||||
</LabelWithTooltip>
|
||||
<Input
|
||||
id="maxInputTokens"
|
||||
type="number"
|
||||
value={value.maxInputTokens || ''}
|
||||
onChange={(e) =>
|
||||
handleChange(
|
||||
'maxInputTokens',
|
||||
e.target.value ? parseInt(e.target.value, 10) : undefined
|
||||
)
|
||||
}
|
||||
min="1"
|
||||
placeholder="Auto (128k fallback)"
|
||||
aria-invalid={!!errors['llm.maxInputTokens']}
|
||||
/>
|
||||
{errors['llm.maxInputTokens'] && (
|
||||
<p className="text-xs text-destructive mt-1">
|
||||
{errors['llm.maxInputTokens']}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="maxOutputTokens"
|
||||
tooltip="Maximum output tokens the model can generate. If not specified, uses provider's default (typically 4,096 tokens)"
|
||||
>
|
||||
Max Output Tokens
|
||||
</LabelWithTooltip>
|
||||
<Input
|
||||
id="maxOutputTokens"
|
||||
type="number"
|
||||
value={value.maxOutputTokens || ''}
|
||||
onChange={(e) =>
|
||||
handleChange(
|
||||
'maxOutputTokens',
|
||||
e.target.value ? parseInt(e.target.value, 10) : undefined
|
||||
)
|
||||
}
|
||||
min="1"
|
||||
placeholder="Auto (provider default)"
|
||||
aria-invalid={!!errors['llm.maxOutputTokens']}
|
||||
/>
|
||||
{errors['llm.maxOutputTokens'] && (
|
||||
<p className="text-xs text-destructive mt-1">
|
||||
{errors['llm.maxOutputTokens']}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Provider-Specific Options */}
|
||||
|
||||
{/* Reasoning Effort - Only for models that support it (o1, o3, codex, gpt-5.x) */}
|
||||
{value.model && isReasoningCapableModel(value.model) && (
|
||||
<div>
|
||||
<LabelWithTooltip
|
||||
htmlFor="reasoningEffort"
|
||||
tooltip="Controls reasoning depth for OpenAI models (o1, o3, codex, gpt-5.x). Higher = more thorough but slower/costlier. 'medium' is recommended for most tasks."
|
||||
>
|
||||
Reasoning Effort
|
||||
</LabelWithTooltip>
|
||||
<select
|
||||
id="reasoningEffort"
|
||||
value={value.reasoningEffort || ''}
|
||||
onChange={(e) =>
|
||||
handleChange('reasoningEffort', e.target.value || undefined)
|
||||
}
|
||||
className="flex h-9 w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2"
|
||||
>
|
||||
<option value="">Auto (model default)</option>
|
||||
<option value="none">None - No reasoning</option>
|
||||
<option value="minimal">Minimal - Barely any reasoning</option>
|
||||
<option value="low">Low - Light reasoning</option>
|
||||
<option value="medium">Medium - Balanced (recommended)</option>
|
||||
<option value="high">High - Thorough reasoning</option>
|
||||
<option value="xhigh">Extra High - Maximum quality</option>
|
||||
</select>
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
Only applies to reasoning models (o1, o3, codex, gpt-5.x)
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</Collapsible>
|
||||
);
|
||||
}
|
||||
Reference in New Issue
Block a user