Files
PromptArch/lib/enhance-engine.ts
admin a4b7a0d9e4 feat: v1.3.0 — plan-first workflow, OpenRouter provider, enhanced prompt engine
Major changes:
- Plan-first workflow: AI generates structured plan before code, with
  plan review card (Modify Plan / Start Coding / Skip to Code)
- Post-coding UX: Preview + Request Modifications buttons after code gen
- OpenRouter integration: 4th AI provider with 20+ model support
- Enhanced prompt engine: 9 strategies, 11+ intent patterns, modular
- PLAN MODE system prompt block in all 4 services
- Fixed stale React closure in approveAndGenerate with isApproval flag
- Fixed canvas auto-opening during plan phase with wasIdle gate
- Updated README, CHANGELOG, .env.example, version bump to 1.3.0
2026-03-18 18:45:37 +00:00

973 lines
35 KiB
TypeScript

/**
* Prompt Enhancement Engine
* Based on prompt-master methodology (https://github.com/nidhinjs/prompt-master)
* Client-side prompt analysis and optimization for various AI tools
*/
// ============================================================================
// TYPE DEFINITIONS
// ============================================================================
/**
* Tool categories with different prompting requirements
*/
export type ToolCategory =
| 'reasoning' // Claude, GPT-4o, Gemini - Full structure, XML tags, explicit format locks
| 'thinking' // o1, o3, DeepSeek-R1 - Short clean instructions only, no CoT
| 'openweight' // Llama, Mistral, Qwen - Shorter prompts, simpler structure
| 'agentic' // Claude Code, Devin, SWE-agent - Start/target state, allowed/forbidden actions, stop conditions
| 'ide' // Cursor, Windsurf, Copilot - File path + function + desired change + scope lock
| 'fullstack' // Bolt, v0, Lovable - Stack spec, component boundaries, what NOT to scaffold
| 'image' // Midjourney, DALL-E, Stable Diffusion - Subject + style + mood + lighting + negative prompts
| 'search'; // Perplexity, SearchGPT - Mode specification, citation requirements
/**
* Template frameworks for different prompt structures
*/
export type TemplateFramework =
| 'RTF' // Role, Task, Format - Simple one-shot
| 'CO-STAR' // Context, Objective, Style, Tone, Audience, Response - Professional documents
| 'RISEN' // Role, Instructions, Steps, End Goal, Narrowing - Complex multi-step
| 'CRISPE' // Capacity, Role, Insight, Statement, Personality, Experiment - Creative work
| 'ChainOfThought' // Logic/math/debugging (NOT for thinking models)
| 'FewShot' // Format-sensitive tasks
| 'FileScope' // IDE AI editing
| 'ReActPlusStop' // Agentic AI
| 'VisualDescriptor'; // Image generation
/**
* Severity levels for diagnostic patterns
*/
export type Severity = 'critical' | 'warning' | 'info';
/**
* Diagnostic pattern for prompt analysis
*/
export interface DiagnosticPattern {
id: string;
name: string;
description: string;
category: 'task' | 'context' | 'format' | 'scope' | 'reasoning' | 'agentic';
detect: (prompt: string) => boolean;
fix: string;
severity: Severity;
}
/**
* Result from running diagnostics on a prompt
*/
export interface DiagnosticResult {
pattern: DiagnosticPattern;
detected: boolean;
severity: Severity;
suggestion: string;
}
/**
* Template structure with metadata
*/
export interface Template {
name: string;
framework: TemplateFramework;
description: string;
structure: string[];
bestFor: ToolCategory[];
}
/**
* Complete analysis report for a prompt
*/
export interface AnalysisReport {
prompt: string;
tokenEstimate: number;
suggestedTool: ToolCategory | null;
suggestedTemplate: Template | null;
diagnostics: DiagnosticResult[];
missingDimensions: string[];
overallScore: number; // 0-100
}
// ============================================================================
// TOOL CATEGORIES
// ============================================================================
export const TOOL_CATEGORIES: Record<ToolCategory, {
description: string;
examples: string[];
promptingStyle: string;
}> = {
reasoning: {
description: 'Models with strong reasoning capabilities',
examples: ['Claude', 'GPT-4o', 'Gemini'],
promptingStyle: 'Full structure, XML tags, explicit format locks, detailed instructions'
},
thinking: {
description: 'Models with built-in chain-of-thought',
examples: ['o1', 'o3', 'DeepSeek-R1'],
promptingStyle: 'Short clean instructions only, NO explicit CoT or step-by-step'
},
openweight: {
description: 'Open-source models',
examples: ['Llama', 'Mistral', 'Qwen'],
promptingStyle: 'Shorter prompts, simpler structure, clear direct instructions'
},
agentic: {
description: 'Autonomous coding agents',
examples: ['Claude Code', 'Devin', 'SWE-agent'],
promptingStyle: 'Start/target state, allowed/forbidden actions, stop conditions'
},
ide: {
description: 'IDE-integrated AI assistants',
examples: ['Cursor', 'Windsurf', 'Copilot'],
promptingStyle: 'File path + function + desired change + scope lock'
},
fullstack: {
description: 'Full-stack app builders',
examples: ['Bolt', 'v0', 'Lovable'],
promptingStyle: 'Stack spec, component boundaries, what NOT to scaffold'
},
image: {
description: 'Image generation models',
examples: ['Midjourney', 'DALL-E', 'Stable Diffusion'],
promptingStyle: 'Subject + style + mood + lighting + negative prompts'
},
search: {
description: 'Search-augmented AI',
examples: ['Perplexity', 'SearchGPT'],
promptingStyle: 'Mode specification, citation requirements, source attribution'
}
};
// ============================================================================
// TEMPLATE FRAMEWORKS
// ============================================================================
export const TEMPLATES: Template[] = [
{
name: 'RTF (Role-Task-Format)',
framework: 'RTF',
description: 'Simple one-shot prompts with clear role, task, and output format',
structure: ['Role: Who you are', 'Task: What to do', 'Format: How to output'],
bestFor: ['reasoning', 'openweight']
},
{
name: 'CO-STAR',
framework: 'CO-STAR',
description: 'Comprehensive framework for professional documents and complex tasks',
structure: [
'Context: Background information',
'Objective: What needs to be achieved',
'Style: Writing style and tone',
'Tone: Emotional tone',
'Audience: Who will read this',
'Response: Expected output format'
],
bestFor: ['reasoning', 'thinking', 'openweight']
},
{
name: 'RISEN',
framework: 'RISEN',
description: 'Multi-step complex task framework with clear end goals',
structure: [
'Role: AI agent identity',
'Instructions: Task requirements',
'Steps: Sequential actions',
'End Goal: Success criteria',
'Narrowing: Constraints and boundaries'
],
bestFor: ['reasoning', 'agentic']
},
{
name: 'CRISPE',
framework: 'CRISPE',
description: 'Creative work framework with personality and experimentation',
structure: [
'Capacity: What you can do',
'Role: Creative identity',
'Insight: Key perspective',
'Statement: The core request',
'Personality: Tone and style',
'Experiment: Creative constraints'
],
bestFor: ['reasoning', 'openweight']
},
{
name: 'Chain of Thought',
framework: 'ChainOfThought',
description: 'Step-by-step reasoning for logic, math, and debugging (NOT for thinking models)',
structure: [
'Problem statement',
'Step-by-step reasoning',
'Final answer',
'Verification'
],
bestFor: ['reasoning', 'openweight']
},
{
name: 'Few-Shot Learning',
framework: 'FewShot',
description: 'Provide examples to guide format-sensitive tasks',
structure: [
'Task description',
'Example 1: Input -> Output',
'Example 2: Input -> Output',
'Example 3: Input -> Output',
'Actual task'
],
bestFor: ['reasoning', 'openweight']
},
{
name: 'File-Scope Lock',
framework: 'FileScope',
description: 'IDE-specific editing with precise file and function targeting',
structure: [
'File path',
'Function/component name',
'Current code snippet',
'Desired change',
'Scope: ONLY modify X, do NOT touch Y'
],
bestFor: ['ide']
},
{
name: 'ReAct + Stop Conditions',
framework: 'ReActPlusStop',
description: 'Agentic framework with explicit stopping rules',
structure: [
'Starting state: Current situation',
'Target state: Desired outcome',
'Allowed actions: What you CAN do',
'Forbidden actions: What you CANNOT do',
'Stop conditions: When to pause and ask',
'Output requirements: Progress reporting'
],
bestFor: ['agentic']
},
{
name: 'Visual Descriptor',
framework: 'VisualDescriptor',
description: 'Comprehensive image generation prompt structure',
structure: [
'Subject: Main element',
'Style: Art style or aesthetic',
'Mood: Emotional quality',
'Lighting: Light source and quality',
'Composition: Framing and perspective',
'Colors: Color palette',
'Negative prompts: What to exclude'
],
bestFor: ['image']
}
];
// ============================================================================
// DIAGNOSTIC PATTERNS (35 Total)
// ============================================================================
const TASK_PATTERNS: DiagnosticPattern[] = [
{
id: 'task-001',
name: 'Vague task verb',
description: 'Uses generic verbs like "help", "fix", "make" without specifics',
category: 'task',
detect: (prompt: string) => {
const vagueVerbs = /\b(help|fix|make|improve|update|change|handle|work on)\b/i;
const noSpecifics = !/\b(specifically|exactly|to|that|which|called|named):\b/i.test(prompt);
return vagueVerbs.test(prompt) && noSpecifics && prompt.split(' ').length < 30;
},
fix: 'Replace vague verbs with specific action verbs. Instead of "fix this", use "add error handling to the login function"',
severity: 'warning'
},
{
id: 'task-002',
name: 'Two tasks in one',
description: 'Contains multiple distinct tasks in a single prompt',
category: 'task',
detect: (prompt: string) => {
const andPattern = /\b(and|also|plus|additionally)\s+[a-z]+\b/i;
const commaTasks = /\b(create|build|fix|add|write|update)[^,.]+,[^,.]+(create|build|fix|add|write|update)/i;
return andPattern.test(prompt) || commaTasks.test(prompt);
},
fix: 'Split into separate prompts. Each prompt should have ONE primary task.',
severity: 'critical'
},
{
id: 'task-003',
name: 'No success criteria',
description: 'Missing clear definition of when the task is complete',
category: 'task',
detect: (prompt: string) => {
const successWords = /\b(done when|success criteria|complete when|should|must result|verify that|ensure that|passes when)\b/i;
const isComplexTask = /\b(build|create|implement|develop|design|setup)\b/i.test(prompt);
return isComplexTask && !successWords.test(prompt);
},
fix: 'Add explicit success criteria: "The task is complete when [specific condition is met]"',
severity: 'warning'
},
{
id: 'task-004',
name: 'Over-permissive agent',
description: 'Gives AI too much freedom without constraints',
category: 'task',
detect: (prompt: string) => {
const permissivePhrases = /\b(whatever it takes|do your best|figure it out|you decide|however you want|as you see fit)\b/i;
return permissivePhrases.test(prompt);
},
fix: 'Replace open-ended permissions with specific constraints and scope boundaries.',
severity: 'critical'
},
{
id: 'task-005',
name: 'Emotional task description',
description: 'Uses emotional language without specific technical details',
category: 'task',
detect: (prompt: string) => {
const emotionalWords = /\b(broken|mess|terrible|awful|doesn't work|horrible|stupid|hate|frustrating)\b/i;
const noTechnicalDetails = !/\b(error|bug|line|function|file|exception|fail|crash)\b/i.test(prompt);
return emotionalWords.test(prompt) && noTechnicalDetails;
},
fix: 'Replace emotional language with specific technical details: what error, what line, what behavior?',
severity: 'warning'
},
{
id: 'task-006',
name: 'Build-the-whole-thing',
description: 'Attempts to build an entire project in one prompt',
category: 'task',
detect: (prompt: string) => {
const wholeProjectPhrases = /\b(entire app|whole project|full website|complete system|everything|end to end|from scratch)\b/i;
return wholeProjectPhrases.test(prompt);
},
fix: 'Break down into smaller, iterative prompts. Start with core functionality, then add features.',
severity: 'critical'
},
{
id: 'task-007',
name: 'Implicit reference',
description: 'References something previously mentioned without context',
category: 'task',
detect: (prompt: string) => {
const implicitRefs = /\b(the thing|that one|what we discussed|from before|the previous|like the other)\b/i;
const noContext = prompt.split(' ').length < 50;
return implicitRefs.test(prompt) && noContext;
},
fix: 'Always include full context. Replace "the thing" with specific name/description.',
severity: 'critical'
}
];
const CONTEXT_PATTERNS: DiagnosticPattern[] = [
{
id: 'ctx-001',
name: 'Assumed prior knowledge',
description: 'Assumes AI remembers previous conversations or context',
category: 'context',
detect: (prompt: string) => {
const assumptionPhrases = /\b(continue|as before|like we said|you know|from our chat|from earlier)\b/i;
const noContextProvided = prompt.split(' ').length < 40;
return assumptionPhrases.test(prompt) && noContextProvided;
},
fix: 'Include relevant context from previous work. Do not assume continuity.',
severity: 'warning'
},
{
id: 'ctx-002',
name: 'No project context',
description: 'Very short prompt with no domain or technology context',
category: 'context',
detect: (prompt: string) => {
const wordCount = prompt.split(/\s+/).length;
const hasTech = /\b(javascript|python|react|api|database|server|frontend|backend|mobile|web)\b/i;
return wordCount < 15 && !hasTech.test(prompt);
},
fix: 'Add project context: technology stack, domain, and what you\'re building.',
severity: 'warning'
},
{
id: 'ctx-003',
name: 'Forgotten stack',
description: 'Tech-agnostic prompt that implies an existing project',
category: 'context',
detect: (prompt: string) => {
const projectWords = /\b(add to|update the|change the|modify the|existing|current)\b/i;
const noTechStack = !/\b(javascript|typescript|python|java|rust|go|react|vue|angular|node|django|rails)\b/i.test(prompt);
return projectWords.test(prompt) && noTechStack;
},
fix: 'Specify your technology stack: language, framework, and key dependencies.',
severity: 'critical'
},
{
id: 'ctx-004',
name: 'Hallucination invite',
description: 'Asks for general knowledge that may not exist',
category: 'context',
detect: (prompt: string) => {
const hallucinationPhrases = /\b(what do experts say|what is commonly known|generally accepted|most people think|typical approach)\b/i;
return hallucinationPhrases.test(prompt);
},
fix: 'Ask for specific sources or provide source material. Avoid general "what do X think" questions.',
severity: 'info'
},
{
id: 'ctx-005',
name: 'Undefined audience',
description: 'User-facing output without audience specification',
category: 'context',
detect: (prompt: string) => {
const userFacing = /\b(write|create|generate|draft)\s+(content|message|email|copy|text|documentation)\b/i;
const noAudience = !/\b(for|audience|target|reader|user|customer|stakeholder)\b/i.test(prompt);
return userFacing.test(prompt) && noAudience;
},
fix: 'Specify who will read this output: "Write for [audience] who [context]"',
severity: 'warning'
},
{
id: 'ctx-006',
name: 'No prior failures',
description: 'Complex task without mentioning what was tried before',
category: 'context',
detect: (prompt: string) => {
const complexTask = /\b(debug|fix|solve|resolve|implement|build|create)\b/i;
const noPriorAttempts = !/\b(tried|attempted|already|previous|before|not working|failed)\b/i.test(prompt);
const isLongPrompt = prompt.split(' ').length > 20;
return complexTask.test(prompt) && noPriorAttempts && isLongPrompt;
},
fix: 'Mention what you\'ve already tried: "I tried X but got Y error. Now..."',
severity: 'info'
}
];
const FORMAT_PATTERNS: DiagnosticPattern[] = [
{
id: 'fmt-001',
name: 'Missing output format',
description: 'No specification of how output should be structured',
category: 'format',
detect: (prompt: string) => {
const formatKeywords = /\b(list|table|json|markdown|bullet|paragraph|csv|html|code|steps)\b/i;
const outputKeywords = /\b(output|return|format as|in the form of|structure)\b/i;
return !formatKeywords.test(prompt) && !outputKeywords.test(prompt);
},
fix: 'Specify output format: "Return as a bulleted list" or "Output as JSON"',
severity: 'warning'
},
{
id: 'fmt-002',
name: 'Implicit length',
description: 'Uses length terms without specific counts',
category: 'format',
detect: (prompt: string) => {
const vagueLength = /\b(summary|description|overview|brief|short|long|detailed)\b/i;
const noSpecificLength = !/\b(\d+\s*(words?|sentences?|paragraphs?)|under\s*\d+|max\s*\d+)\b/i.test(prompt);
return vagueLength.test(prompt) && noSpecificLength;
},
fix: 'Be specific: "Write 2-3 sentences" or "Keep under 100 words"',
severity: 'info'
},
{
id: 'fmt-003',
name: 'No role assignment',
description: 'Long prompt without specifying who AI should be',
category: 'format',
detect: (prompt: string) => {
const wordCount = prompt.split(/\s+/).length;
const roleKeywords = /\b(act as|you are|role|persona|expert|specialist|professional|engineer|developer|analyst)\b/i;
return wordCount > 50 && !roleKeywords.test(prompt);
},
fix: 'Add role assignment: "Act as a [role] with [expertise]"',
severity: 'info'
},
{
id: 'fmt-004',
name: 'Vague aesthetic',
description: 'Design-related prompt without specific visual direction',
category: 'format',
detect: (prompt: string) => {
const vagueAesthetic = /\b(professional|clean|modern|nice|good looking|beautiful|sleek)\b/i;
const noVisualSpecs = !/\b(colors?|fonts?|spacing|layout|style|theme|design system)\b/i.test(prompt);
return vagueAesthetic.test(prompt) && noVisualSpecs;
},
fix: 'Specify visual details: colors, typography, spacing, specific design reference.',
severity: 'warning'
},
{
id: 'fmt-005',
name: 'No negative prompts for image',
description: 'Image generation without exclusion criteria',
category: 'format',
detect: (prompt: string) => {
const imageKeywords = /\b(image|photo|picture|illustration|generate|create art|midjourney|dall-e)\b/i;
const noNegative = !/\b(negative|exclude|avoid|without|no|not)\b/i.test(prompt);
return imageKeywords.test(prompt) && noNegative;
},
fix: 'Add negative prompts: "Negative: blurry, low quality, distorted"',
severity: 'warning'
},
{
id: 'fmt-006',
name: 'Prose for Midjourney',
description: 'Long descriptive sentences instead of keyword-style prompts',
category: 'format',
detect: (prompt: string) => {
const longSentences = prompt.split(/[.!?]/).filter(s => s.trim().split(' ').length > 10).length > 0;
const imageKeywords = /\b(image|photo|art|illustration|midjourney|dall-e|stable diffusion)\b/i;
return imageKeywords.test(prompt) && longSentences;
},
fix: 'Use keyword-style prompts: "Subject, style, mood, lighting, --ar 16:9"',
severity: 'warning'
}
];
const SCOPE_PATTERNS: DiagnosticPattern[] = [
{
id: 'scp-001',
name: 'No scope boundary',
description: 'Missing specific scope constraints',
category: 'scope',
detect: (prompt: string) => {
const scopeWords = /\b(only|just|specifically|exactly|limit|restrict)\b/i;
const hasFilePath = /\/[\w.]+/.test(prompt) || /\b[\w-]+\.(js|ts|py|java|go|rs|cpp|c|h)\b/i;
const hasFunction = /\b(function|method|class|component)\s+\w+/i;
return !scopeWords.test(prompt) && !hasFilePath && !hasFunction;
},
fix: 'Add scope boundary: "Only modify X, do NOT touch Y"',
severity: 'warning'
},
{
id: 'scp-002',
name: 'No stack constraints',
description: 'Technical task without version specifications',
category: 'scope',
detect: (prompt: string) => {
const techTask = /\b(build|create|implement|setup|install|use|add)\s+(\w+\s+){0,3}(app|api|server|database|system)\b/i;
const noVersion = !/\b(version|v\d+|\d+\.\d+|specifically|exactly)\b/i.test(prompt);
return techTask.test(prompt) && noVersion;
},
fix: 'Specify versions: "Use React 18 with TypeScript 5"',
severity: 'warning'
},
{
id: 'scp-003',
name: 'No stop condition for agents',
description: 'Agentic task without explicit stopping rules',
category: 'scope',
detect: (prompt: string) => {
const agentKeywords = /\b(agent|autonomous|run this|execute|iterate|keep going)\b/i;
const noStop = !/\b(stop|pause|ask me|check in|before continuing|confirm)\b/i.test(prompt);
return agentKeywords.test(prompt) && noStop;
},
fix: 'Add stop conditions: "Stop and ask before deleting files" or "Pause after each major step"',
severity: 'critical'
},
{
id: 'scp-004',
name: 'No file path for IDE',
description: 'IDE editing without file specification',
category: 'scope',
detect: (prompt: string) => {
const editKeywords = /\b(update|fix|change|modify|edit|refactor)\b/i;
const hasPath = /\/[\w./-]+|\b[\w-]+\.(js|ts|jsx|tsx|py|java|go|rs|cpp|c|h|css|html|json)\b/i;
return editKeywords.test(prompt) && !hasPath;
},
fix: 'Always include file path: "Update src/components/Header.tsx"',
severity: 'critical'
},
{
id: 'scp-005',
name: 'Wrong template',
description: 'Template mismatch for the target tool',
category: 'scope',
detect: (prompt: string) => {
// Detect if using complex structure for thinking models
const thinkingModel = /\b(o1|o3|deepseek.*r1|thinking)\b/i;
const complexStructure = /\b(step by step|think through|reasoning|<thinking>|chain of thought)\b/i;
return thinkingModel.test(prompt) && complexStructure.test(prompt);
},
fix: 'For thinking models (o1, o3, R1), use short clean instructions without explicit CoT.',
severity: 'critical'
},
{
id: 'scp-006',
name: 'Pasting codebase',
description: 'Extremely long prompt suggesting codebase paste',
category: 'scope',
detect: (prompt: string) => {
const wordCount = prompt.split(/\s+/).length;
const multipleFiles = (prompt.match(/```/g) || []).length > 4;
return wordCount > 500 || multipleFiles;
},
fix: 'Use file paths and references instead of pasting entire files. Or use an IDE AI tool.',
severity: 'warning'
}
];
const REASONING_PATTERNS: DiagnosticPattern[] = [
{
id: 'rsn-001',
name: 'No CoT for logic',
description: 'Complex logic task without step-by-step instructions',
category: 'reasoning',
detect: (prompt: string) => {
const logicKeywords = /\b(compare|analyze|which is better|debug|why does|explain why|how does|verify)\b/i;
const noCoT = !/\b(step by step|walk through|reasoning|think through|first|then|finally)\b/i.test(prompt);
return logicKeywords.test(prompt) && noCoT;
},
fix: 'Add "Step by step" or "Walk through your reasoning" for logic tasks.',
severity: 'warning'
},
{
id: 'rsn-002',
name: 'CoT on reasoning models',
description: 'Explicit CoT instructions for thinking models',
category: 'reasoning',
detect: (prompt: string) => {
const thinkingModel = /\b(o1|o3|deepseek.*r1)\b/i;
const explicitCoT = /\b(step by step|think through|<thinking>|reasoning process|show your work)\b/i;
return thinkingModel.test(prompt) && explicitCoT.test(prompt);
},
fix: 'Remove explicit CoT instructions. Thinking models have built-in reasoning.',
severity: 'critical'
},
{
id: 'rsn-003',
name: 'Inter-session memory',
description: 'Assumes AI remembers across separate sessions',
category: 'reasoning',
detect: (prompt: string) => {
const memoryPhrases = /\b(you already know|remember|from our conversation|we discussed|earlier we|as mentioned)\b/i;
return memoryPhrases.test(prompt);
},
fix: 'AI does not remember between sessions. Include all necessary context.',
severity: 'info'
},
{
id: 'rsn-004',
name: 'Contradicting prior',
description: 'Explicit contradiction of previous instructions',
category: 'reasoning',
detect: (prompt: string) => {
const contradictionPhrases = /\b(actually|wait|ignore what i said|forget that|never mind|scratch that)\b/i;
return contradictionPhrases.test(prompt);
},
fix: 'State corrections clearly: "Correction: Replace X with Y"',
severity: 'warning'
},
{
id: 'rsn-005',
name: 'No grounding rule',
description: 'Factual task without certainty constraints',
category: 'reasoning',
detect: (prompt: string) => {
const factualTask = /\b(summarize|what is|tell me about|explain|list|research|find)\b/i;
const noGrounding = !/\b(if unsure|don't hallucinate|only if certain|say i don't know|stick to)\b/i.test(prompt);
return factualTask.test(prompt) && noGrounding && prompt.split(' ').length > 10;
},
fix: 'Add grounding: "If uncertain, say so rather than guessing"',
severity: 'info'
}
];
const AGENTIC_PATTERNS: DiagnosticPattern[] = [
{
id: 'agt-001',
name: 'No starting state',
description: 'Build/create task without current state description',
category: 'agentic',
detect: (prompt: string) => {
const buildKeywords = /\b(build|create|set up|implement|develop|make)\b/i;
const currentState = !/\b(currently|existing|now|currently have|right now|starting from)\b/i.test(prompt);
return buildKeywords.test(prompt) && currentState;
},
fix: 'Describe starting state: "Currently I have X. I want to reach Y."',
severity: 'warning'
},
{
id: 'agt-002',
name: 'No target state',
description: 'Agentic task without explicit deliverable',
category: 'agentic',
detect: (prompt: string) => {
const vagueCompletion = /\b(work on this|handle this|do this|take care of)\b/i;
const noTarget = !/\b(result should|final output|deliverable|end with|complete when)\b/i.test(prompt);
return vagueCompletion.test(prompt) && noTarget;
},
fix: 'Specify target state: "The final result should be [specific outcome]"',
severity: 'critical'
},
{
id: 'agt-003',
name: 'Silent agent',
description: 'Multi-step task without progress reporting requirements',
category: 'agentic',
detect: (prompt: string) => {
const multiStep = /\b(then|next|after that|first|second|finally)\b/i;
const noOutput = !/\b(show me|report|output|print|log|display progress|tell me)\b/i.test(prompt);
return multiStep.test(prompt) && noOutput;
},
fix: 'Add output requirements: "Report progress after each step"',
severity: 'warning'
},
{
id: 'agt-004',
name: 'Unlocked filesystem',
description: 'Agentic task without file access restrictions',
category: 'agentic',
detect: (prompt: string) => {
const agentKeywords = /\b(agent|autonomous|run|execute|implement|build|create)\b/i;
const noRestrictions = !/\b(only touch|don't modify|never delete|restrict to|scope|limit)\b/i.test(prompt);
return agentKeywords.test(prompt) && noRestrictions;
},
fix: 'Add file restrictions: "Only modify files in X, never touch Y"',
severity: 'critical'
},
{
id: 'agt-005',
name: 'No review trigger',
description: 'Agentic task without approval checkpoints',
category: 'agentic',
detect: (prompt: string) => {
const riskyActions = /\b(delete|remove|overwrite|deploy|publish|submit|merge)\b/i;
const noReview = !/\b(ask before|confirm|review|approve|check with me)\b/i.test(prompt);
return riskyActions.test(prompt) && noReview;
},
fix: 'Add review triggers: "Ask before deleting any files" or "Confirm before deploying"',
severity: 'critical'
}
];
// Combine all patterns
export const ALL_PATTERNS: DiagnosticPattern[] = [
...TASK_PATTERNS,
...CONTEXT_PATTERNS,
...FORMAT_PATTERNS,
...SCOPE_PATTERNS,
...REASONING_PATTERNS,
...AGENTIC_PATTERNS
];
// ============================================================================
// CORE FUNCTIONS
// ============================================================================
/**
* Auto-detect the target AI tool category based on prompt content
*/
export function detectToolCategory(prompt: string): ToolCategory | null {
const p = prompt.toLowerCase();
// Check for specific tool mentions
if (/(claude|gpt-4|gemini|gpt4)/i.test(prompt)) return 'reasoning';
if (/(o1|o3|deepseek.*r1|thinking.*model)/i.test(prompt)) return 'thinking';
if (/(llama|mistral|qwen|open.*weight|local.*model)/i.test(prompt)) return 'openweight';
if (/(claude code|devin|swe.*agent|autonomous.*agent)/i.test(prompt)) return 'agentic';
if (/(cursor|windsurf|copilot|ide.*ai|editor.*ai)/i.test(prompt)) return 'ide';
if (/(bolt|v0|lovable|fullstack.*ai|app.*builder)/i.test(prompt)) return 'fullstack';
if (/(midjourney|dall.?e|stable diffusion|image.*generate|create.*image|generate.*art)/i.test(prompt)) return 'image';
if (/(perplexity|searchgpt|search.*ai|research.*mode)/i.test(prompt)) return 'search';
// Infer from content patterns
if (/\.(js|ts|py|java|go|rs|cpp|c|h)\b/.test(prompt) && /\b(update|fix|change|modify)\b/.test(p)) return 'ide';
if (/\b(build|create|set up|implement).*\b(app|api|server|system)\b/.test(p) && /\b(stop|pause|ask before)\b/.test(p)) return 'agentic';
if (/\b(step by step|<thinking>|chain of thought|reasoning)\b/.test(p)) return 'reasoning';
if (/\b(image|photo|art|illustration)\b/.test(p) && /\b(style|mood|lighting)\b/.test(p)) return 'image';
return null;
}
/**
* Select the best template based on tool category and prompt analysis
*/
export function selectTemplate(prompt: string, toolCategory: ToolCategory | null): Template | null {
const p = prompt.toLowerCase();
// Image generation
if (toolCategory === 'image' || /\b(image|photo|art|illustration|midjourney|dall.?e)\b/.test(p)) {
return TEMPLATES.find(t => t.framework === 'VisualDescriptor') || null;
}
// IDE editing
if (toolCategory === 'ide' || (/\.(js|ts|py|java|go|rs)\b/.test(prompt) && /\b(update|fix|modify)\b/.test(p))) {
return TEMPLATES.find(t => t.framework === 'FileScope') || null;
}
// Agentic tasks
if (toolCategory === 'agentic' || /\b(build|create|set up).*\b(stop|pause|ask before)\b/.test(p)) {
return TEMPLATES.find(t => t.framework === 'ReActPlusStop') || null;
}
// Complex multi-step tasks
if (/\b(step|then|next|after|first|second|finally)\b/.test(p) && p.split(' ').length > 30) {
return TEMPLATES.find(t => t.framework === 'RISEN') || null;
}
// Logic/debugging tasks
if (/\b(debug|compare|analyze|which is better|why does|verify)\b/.test(p)) {
if (toolCategory !== 'thinking') {
return TEMPLATES.find(t => t.framework === 'ChainOfThought') || null;
}
}
// Professional documents
if (/\b(documentation|report|proposal|spec|requirements)\b/.test(p) && p.split(' ').length > 40) {
return TEMPLATES.find(t => t.framework === 'CO-STAR') || null;
}
// Creative work
if (/\b(creative|design|story|narrative|brand|voice)\b/.test(p)) {
return TEMPLATES.find(t => t.framework === 'CRISPE') || null;
}
// Format-sensitive tasks
if (/\b(example|sample|format|pattern|template)\b/.test(p)) {
return TEMPLATES.find(t => t.framework === 'FewShot') || null;
}
// Default to RTF for simple prompts
if (p.split(' ').length < 50) {
return TEMPLATES.find(t => t.framework === 'RTF') || null;
}
// Default for longer prompts
return TEMPLATES.find(t => t.framework === 'CO-STAR') || null;
}
/**
* Run all diagnostic patterns on a prompt
*/
export function runDiagnostics(prompt: string): DiagnosticResult[] {
const results: DiagnosticResult[] = [];
for (const pattern of ALL_PATTERNS) {
const detected = pattern.detect(prompt);
if (detected) {
results.push({
pattern,
detected: true,
severity: pattern.severity,
suggestion: pattern.fix
});
}
}
// Sort by severity (critical first)
const severityOrder = { critical: 0, warning: 1, info: 2 };
results.sort((a, b) => severityOrder[a.severity] - severityOrder[b.severity]);
return results;
}
/**
* Estimate token count (rough approximation: ~0.75 words per token)
*/
export function estimateTokens(prompt: string): number {
const wordCount = prompt.split(/\s+/).length;
return Math.ceil(wordCount * 0.75);
}
/**
* Identify missing dimensions from a prompt
*/
export function identifyMissingDimensions(prompt: string): string[] {
const missing: string[] = [];
const p = prompt.toLowerCase();
// Check for common dimensions
if (!/\b(act as|you are|role|expert|specialist)\b/i.test(prompt)) {
missing.push('Role/Identity');
}
if (!/\b(context|background|project|currently working)\b/i.test(prompt)) {
missing.push('Context');
}
if (!/\b(format|output|return as|structure)\b/i.test(prompt)) {
missing.push('Output Format');
}
if (!/\b(success|complete when|done when|verify|ensure)\b/i.test(prompt)) {
missing.push('Success Criteria');
}
if (!/\b(only|just|limit|restrict|scope)\b/i.test(prompt) && prompt.split(' ').length > 20) {
missing.push('Scope Boundaries');
}
if (!/\b(javascript|python|react|node|typescript|java|rust|go)\b/i.test(prompt) &&
/\b(code|function|class|app|api)\b/i.test(prompt)) {
missing.push('Technology Stack');
}
return missing;
}
/**
* Calculate overall prompt quality score (0-100)
*/
export function calculateScore(diagnostics: DiagnosticResult[], missingDimensions: string[]): number {
let score = 100;
// Deduct for diagnostics
for (const d of diagnostics) {
switch (d.severity) {
case 'critical': score -= 15; break;
case 'warning': score -= 8; break;
case 'info': score -= 3; break;
}
}
// Deduct for missing dimensions
score -= missingDimensions.length * 5;
return Math.max(0, Math.min(100, score));
}
/**
* Generate comprehensive analysis report
*/
export function generateAnalysisReport(prompt: string): AnalysisReport {
const suggestedTool = detectToolCategory(prompt);
const suggestedTemplate = selectTemplate(prompt, suggestedTool);
const diagnostics = runDiagnostics(prompt);
const missingDimensions = identifyMissingDimensions(prompt);
const tokenEstimate = estimateTokens(prompt);
const overallScore = calculateScore(diagnostics, missingDimensions);
return {
prompt,
tokenEstimate,
suggestedTool,
suggestedTemplate,
diagnostics,
missingDimensions,
overallScore
};
}
/**
* Get human-readable tool category description
*/
export function getToolDescription(category: ToolCategory): string {
return TOOL_CATEGORIES[category].description;
}
/**
* Get prompting style for a tool category
*/
export function getPromptingStyle(category: ToolCategory): string {
return TOOL_CATEGORIES[category].promptingStyle;
}
/**
* Get patterns by category
*/
export function getPatternsByCategory(category: DiagnosticPattern['category']): DiagnosticPattern[] {
return ALL_PATTERNS.filter(p => p.category === category);
}
/**
* Get pattern by ID
*/
export function getPatternById(id: string): DiagnosticPattern | undefined {
return ALL_PATTERNS.find(p => p.id === id);
}