TUI5: Added 4 new feature modules - Session Memory, Context Manager, Skills Library, Debug Logger

This commit is contained in:
Gemini AI
2025-12-14 20:35:11 +04:00
Unverified
parent 9e83d9d2c2
commit d252957dd2
5 changed files with 930 additions and 92 deletions

View File

@@ -43,6 +43,17 @@ import ThinkingBlock from './ui/components/ThinkingBlock.mjs';
import ChatBubble from './ui/components/ChatBubble.mjs'; import ChatBubble from './ui/components/ChatBubble.mjs';
import TodoList from './ui/components/TodoList.mjs'; import TodoList from './ui/components/TodoList.mjs';
// ═══════════════════════════════════════════════════════════════
// NEW FEATURE MODULES - Inspired by Mini-Agent, original implementation
// ═══════════════════════════════════════════════════════════════
import { getSessionMemory } from '../lib/session-memory.mjs';
import { getContextManager } from '../lib/context-manager.mjs';
import { getAllSkills, getSkill, executeSkill, getSkillListDisplay } from '../lib/skills.mjs';
import { getDebugLogger, initFromArgs } from '../lib/debug-logger.mjs';
// Initialize debug logger from CLI args
const debugLogger = initFromArgs();
const { useState, useCallback, useEffect, useRef, useMemo } = React; const { useState, useCallback, useEffect, useRef, useMemo } = React;
// Custom hook for terminal dimensions (replaces ink-use-stdout-dimensions) // Custom hook for terminal dimensions (replaces ink-use-stdout-dimensions)
@@ -1051,8 +1062,8 @@ const SmoothCounter = ({ value }) => {
}; };
// Component: EnhancedTypewriterText - Improved text reveal with batching and adaptive speed // Component: EnhancedTypewriterText - Improved text reveal with batching and adaptive speed
const EnhancedTypewriterText = ({ const EnhancedTypewriterText = ({
children, children,
speed = 25, speed = 25,
batchSize = 1 // Default to 1 for safety, can be increased for batching batchSize = 1 // Default to 1 for safety, can be increased for batching
}) => { }) => {
@@ -1084,10 +1095,10 @@ const EnhancedTypewriterText = ({
// Calculate batch size (may be smaller near the end) // Calculate batch size (may be smaller near the end)
const remaining = fullText.length - positionRef.current; const remaining = fullText.length - positionRef.current;
const currentBatchSize = Math.min(batchSize, remaining); const currentBatchSize = Math.min(batchSize, remaining);
// Get the next batch of characters // Get the next batch of characters
const nextBatch = fullText.substring(positionRef.current, positionRef.current + currentBatchSize); const nextBatch = fullText.substring(positionRef.current, positionRef.current + currentBatchSize);
// Update display and position // Update display and position
setDisplayText(prev => prev + nextBatch); setDisplayText(prev => prev + nextBatch);
positionRef.current += currentBatchSize; positionRef.current += currentBatchSize;
@@ -3362,106 +3373,106 @@ This gives the user a chance to refine requirements before implementation.
const fullPrompt = systemPrompt + '\n\n[USER REQUEST]\n' + fullText; const fullPrompt = systemPrompt + '\n\n[USER REQUEST]\n' + fullText;
let fullResponse = ''; let fullResponse = '';
// PROVIDER SWITCH: Use OpenCode Free or Qwen based on provider state // PROVIDER SWITCH: Use OpenCode Free or Qwen based on provider state
const streamStartTime = Date.now(); // Track start time for this request const streamStartTime = Date.now(); // Track start time for this request
let totalCharsReceived = 0; // Track total characters for speed calculation let totalCharsReceived = 0; // Track total characters for speed calculation
const result = provider === 'opencode-free'
? await callOpenCodeFree(fullPrompt, freeModel, (chunk) => {
const cleanChunk = chunk.replace(/[\u001b\u009b][[\]()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, '');
// IMPROVED STREAM SPLITTING LOGIC (Thinking vs Content) const result = provider === 'opencode-free'
// Claude Code style: cleaner separation of thinking from response ? await callOpenCodeFree(fullPrompt, freeModel, (chunk) => {
const lines = cleanChunk.split('\n'); const cleanChunk = chunk.replace(/[\u001b\u009b][[\]()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, '');
let isThinkingChunk = false;
// Enhanced heuristics for better Claude-like thinking detection // IMPROVED STREAM SPLITTING LOGIC (Thinking vs Content)
const trimmedChunk = cleanChunk.trim(); // Claude Code style: cleaner separation of thinking from response
if (/^(Let me|Now let me|I'll|I need to|I should|I notice|I can|I will|Thinking:|Analyzing|Considering|Checking|Looking|Planning|First|Next|Finally)/i.test(trimmedChunk)) { const lines = cleanChunk.split('\n');
isThinkingChunk = true; let isThinkingChunk = false;
} else if (/^```|# |Here is|```|```|```/i.test(trimmedChunk)) {
// If we encounter code blocks or headers, likely content not thinking
isThinkingChunk = false;
}
// Update character count for speed calculation // Enhanced heuristics for better Claude-like thinking detection
totalCharsReceived += cleanChunk.length; const trimmedChunk = cleanChunk.trim();
if (/^(Let me|Now let me|I'll|I need to|I should|I notice|I can|I will|Thinking:|Analyzing|Considering|Checking|Looking|Planning|First|Next|Finally)/i.test(trimmedChunk)) {
// Calculate current streaming speed (chars per second) isThinkingChunk = true;
const elapsedSeconds = (Date.now() - streamStartTime) / 1000; } else if (/^```|# |Here is|```|```|```/i.test(trimmedChunk)) {
const speed = elapsedSeconds > 0 ? Math.round(totalCharsReceived / elapsedSeconds) : 0; // If we encounter code blocks or headers, likely content not thinking
isThinkingChunk = false;
}
// GLOBAL STATS UPDATE (Run for ALL chunks) // Update character count for speed calculation
setThinkingStats(prev => ({ totalCharsReceived += cleanChunk.length;
...prev,
chars: totalCharsReceived,
speed: speed
}));
// GLOBAL AGENT DETECTION (Run for ALL chunks) // Calculate current streaming speed (chars per second)
const agentMatch = cleanChunk.match(/\[AGENT:\s*([^\]]+)\]/i); const elapsedSeconds = (Date.now() - streamStartTime) / 1000;
if (agentMatch) { const speed = elapsedSeconds > 0 ? Math.round(totalCharsReceived / elapsedSeconds) : 0;
setThinkingStats(prev => ({ ...prev, activeAgent: agentMatch[1].trim() }));
}
if (isThinkingChunk) { // GLOBAL STATS UPDATE (Run for ALL chunks)
setThinkingLines(prev => [...prev, ...lines.map(l => l.trim()).filter(l => l && !/^(Let me|Now let me|I'll|I need to|I notice)/i.test(l.trim()))]); setThinkingStats(prev => ({
} else { ...prev,
setMessages(prev => { chars: totalCharsReceived,
const last = prev[prev.length - 1]; speed: speed
if (last && last.role === 'assistant') { }));
return [...prev.slice(0, -1), { ...last, content: last.content + cleanChunk }];
}
return prev;
});
}
})
: await getQwen().sendMessage(fullPrompt, 'qwen-coder-plus', null, (chunk) => {
const cleanChunk = chunk.replace(/[\u001b\u009b][[\]()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, '');
// IMPROVED STREAM SPLITTING LOGIC (Thinking vs Content) // GLOBAL AGENT DETECTION (Run for ALL chunks)
const lines = cleanChunk.split('\n'); const agentMatch = cleanChunk.match(/\[AGENT:\s*([^\]]+)\]/i);
let isThinkingChunk = false; if (agentMatch) {
setThinkingStats(prev => ({ ...prev, activeAgent: agentMatch[1].trim() }));
}
// Enhanced heuristics for better Claude-like thinking detection if (isThinkingChunk) {
const trimmedChunk = cleanChunk.trim(); setThinkingLines(prev => [...prev, ...lines.map(l => l.trim()).filter(l => l && !/^(Let me|Now let me|I'll|I need to|I notice)/i.test(l.trim()))]);
if (/^(Let me|Now let me|I'll|I need to|I should|I notice|I can|I will|Thinking:|Analyzing|Considering|Checking|Looking|Planning|First|Next|Finally)/i.test(trimmedChunk)) { } else {
isThinkingChunk = true; setMessages(prev => {
} else if (/^```|# |Here is|```|```|```/i.test(trimmedChunk)) { const last = prev[prev.length - 1];
// If we encounter code blocks or headers, likely content not thinking if (last && last.role === 'assistant') {
isThinkingChunk = false; return [...prev.slice(0, -1), { ...last, content: last.content + cleanChunk }];
} }
return prev;
});
}
})
: await getQwen().sendMessage(fullPrompt, 'qwen-coder-plus', null, (chunk) => {
const cleanChunk = chunk.replace(/[\u001b\u009b][[\]()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, '');
// Update character count for speed calculation (using same variable as OpenCode path) // IMPROVED STREAM SPLITTING LOGIC (Thinking vs Content)
totalCharsReceived += cleanChunk.length; const lines = cleanChunk.split('\n');
let isThinkingChunk = false;
// Calculate current streaming speed (chars per second)
const elapsedSeconds = (Date.now() - streamStartTime) / 1000;
const speed = elapsedSeconds > 0 ? Math.round(totalCharsReceived / elapsedSeconds) : 0;
setThinkingStats(prev => ({ // Enhanced heuristics for better Claude-like thinking detection
...prev, const trimmedChunk = cleanChunk.trim();
chars: totalCharsReceived, if (/^(Let me|Now let me|I'll|I need to|I should|I notice|I can|I will|Thinking:|Analyzing|Considering|Checking|Looking|Planning|First|Next|Finally)/i.test(trimmedChunk)) {
speed: speed isThinkingChunk = true;
})); } else if (/^```|# |Here is|```|```|```/i.test(trimmedChunk)) {
// If we encounter code blocks or headers, likely content not thinking
isThinkingChunk = false;
}
const agentMatch = cleanChunk.match(/\[AGENT:\s*([^\]]+)\]/i); // Update character count for speed calculation (using same variable as OpenCode path)
if (agentMatch) { totalCharsReceived += cleanChunk.length;
setThinkingStats(prev => ({ ...prev, activeAgent: agentMatch[1].trim() }));
}
if (isThinkingChunk) { // Calculate current streaming speed (chars per second)
setThinkingLines(prev => [...prev, ...lines.map(l => l.trim()).filter(l => l && !/^(Let me|Now let me|I'll|I need to|I notice)/i.test(l.trim()))]); const elapsedSeconds = (Date.now() - streamStartTime) / 1000;
} else { const speed = elapsedSeconds > 0 ? Math.round(totalCharsReceived / elapsedSeconds) : 0;
setMessages(prev => {
const last = prev[prev.length - 1]; setThinkingStats(prev => ({
if (last && last.role === 'assistant') { ...prev,
return [...prev.slice(0, -1), { ...last, content: last.content + cleanChunk }]; chars: totalCharsReceived,
} speed: speed
return prev; }));
});
} const agentMatch = cleanChunk.match(/\[AGENT:\s*([^\]]+)\]/i);
}); if (agentMatch) {
setThinkingStats(prev => ({ ...prev, activeAgent: agentMatch[1].trim() }));
}
if (isThinkingChunk) {
setThinkingLines(prev => [...prev, ...lines.map(l => l.trim()).filter(l => l && !/^(Let me|Now let me|I'll|I need to|I notice)/i.test(l.trim()))]);
} else {
setMessages(prev => {
const last = prev[prev.length - 1];
if (last && last.role === 'assistant') {
return [...prev.slice(0, -1), { ...last, content: last.content + cleanChunk }];
}
return prev;
});
}
});
if (result.success) { if (result.success) {
const responseText = result.response || fullResponse; const responseText = result.response || fullResponse;

181
lib/context-manager.mjs Normal file
View File

@@ -0,0 +1,181 @@
/**
* Context Manager - Intelligent context window management for TUI 5
* Auto-summarizes conversation history when approaching token limits
*
* Original implementation for OpenQode TUI
*/
import { getQwen } from '../qwen-oauth.mjs';
// Rough token estimation: ~4 chars per token for English
const CHARS_PER_TOKEN = 4;
/**
* ContextManager class - Manages conversation context and auto-summarization
*/
export class ContextManager {
constructor(options = {}) {
this.tokenLimit = options.tokenLimit || 100000; // Default 100K token context
this.summarizeThreshold = options.summarizeThreshold || 0.5; // Summarize at 50%
this.minMessagesToKeep = options.minMessagesToKeep || 4; // Keep last 4 messages
this.summaryBlock = null; // Stores summarized context
}
/**
* Estimate token count for text
* @param {string} text - Text to count
* @returns {number} Estimated token count
*/
countTokens(text) {
if (!text) return 0;
return Math.ceil(text.length / CHARS_PER_TOKEN);
}
/**
* Count tokens for all messages
* @param {Array} messages - Array of message objects
* @returns {number} Total estimated tokens
*/
countMessageTokens(messages) {
return messages.reduce((total, msg) => {
return total + this.countTokens(msg.content || '');
}, 0);
}
/**
* Get current context usage as percentage
* @param {Array} messages - Current messages
* @returns {number} Percentage (0-100)
*/
getUsagePercent(messages) {
const used = this.countMessageTokens(messages);
return Math.round((used / this.tokenLimit) * 100);
}
/**
* Check if summarization is needed
* @param {Array} messages - Current messages
* @returns {boolean}
*/
shouldSummarize(messages) {
const usage = this.getUsagePercent(messages) / 100;
return usage >= this.summarizeThreshold && messages.length > this.minMessagesToKeep;
}
/**
* Summarize older messages to free up context
* @param {Array} messages - All messages
* @param {Function} onProgress - Progress callback
* @returns {Object} { summary, keptMessages }
*/
async summarize(messages, onProgress = null) {
if (messages.length <= this.minMessagesToKeep) {
return { summary: null, keptMessages: messages };
}
// Split: messages to summarize vs messages to keep
const toSummarize = messages.slice(0, -this.minMessagesToKeep);
const toKeep = messages.slice(-this.minMessagesToKeep);
if (onProgress) onProgress('Summarizing context...');
// Create summary prompt
const summaryPrompt = `Summarize the following conversation history into a concise context summary.
Focus on:
- Key decisions made
- Important context established
- User preferences expressed
- Current project/task state
Keep it under 500 words.
CONVERSATION TO SUMMARIZE:
${toSummarize.map(m => `[${m.role}]: ${m.content}`).join('\n\n')}
SUMMARY:`;
try {
// Use AI to generate summary
const oauth = getQwen();
const result = await oauth.sendMessage(summaryPrompt, 'qwen-turbo');
if (result.success) {
this.summaryBlock = {
type: 'context_summary',
content: result.response,
originalMessageCount: toSummarize.length,
timestamp: new Date().toISOString()
};
return {
summary: this.summaryBlock,
keptMessages: toKeep
};
}
} catch (error) {
console.error('Context summarization failed:', error.message);
}
// Fallback: simple truncation
this.summaryBlock = {
type: 'context_truncated',
content: `[Previous ${toSummarize.length} messages truncated to save context]`,
originalMessageCount: toSummarize.length,
timestamp: new Date().toISOString()
};
return {
summary: this.summaryBlock,
keptMessages: toKeep
};
}
/**
* Get context summary as system prompt addition
*/
getSummaryContext() {
if (!this.summaryBlock) return '';
return `
=== PREVIOUS CONTEXT SUMMARY ===
The following is a summary of earlier conversation (${this.summaryBlock.originalMessageCount} messages):
${this.summaryBlock.content}
=== END SUMMARY ===
`;
}
/**
* Get stats for UI display
* @param {Array} messages - Current messages
* @returns {Object} Stats object
*/
getStats(messages) {
const tokens = this.countMessageTokens(messages);
const percent = this.getUsagePercent(messages);
const needsSummary = this.shouldSummarize(messages);
return {
tokens,
limit: this.tokenLimit,
percent,
needsSummary,
hasSummary: !!this.summaryBlock,
color: percent > 80 ? 'red' : percent > 50 ? 'yellow' : 'green'
};
}
}
// Singleton instance
let _contextManager = null;
export function getContextManager(options = {}) {
if (!_contextManager) {
_contextManager = new ContextManager(options);
}
return _contextManager;
}
export default ContextManager;

219
lib/debug-logger.mjs Normal file
View File

@@ -0,0 +1,219 @@
/**
* Debug Logger - Comprehensive request/response logging for TUI 5
* Enabled via --debug flag or /debug command
*
* Original implementation for OpenQode TUI
*/
import { appendFile, writeFile, readFile, access } from 'fs/promises';
import { join } from 'path';
const DEBUG_FILE = '.openqode-debug.log';
/**
* DebugLogger class - Logs all API requests, responses, and tool executions
*/
export class DebugLogger {
constructor(options = {}) {
this.enabled = options.enabled || false;
this.logPath = options.logPath || join(process.cwd(), DEBUG_FILE);
this.maxLogSize = options.maxLogSize || 5 * 1024 * 1024; // 5MB max
this.sessionId = Date.now().toString(36);
}
/**
* Enable debug logging
*/
enable() {
this.enabled = true;
this.log('DEBUG', 'Debug logging enabled');
}
/**
* Disable debug logging
*/
disable() {
this.log('DEBUG', 'Debug logging disabled');
this.enabled = false;
}
/**
* Toggle debug mode
*/
toggle() {
if (this.enabled) {
this.disable();
} else {
this.enable();
}
return this.enabled;
}
/**
* Format timestamp
*/
timestamp() {
return new Date().toISOString();
}
/**
* Log a message
* @param {string} level - Log level (INFO, DEBUG, WARN, ERROR, API, TOOL)
* @param {string} message - Log message
* @param {Object} data - Optional data to log
*/
async log(level, message, data = null) {
if (!this.enabled && level !== 'DEBUG') return;
const entry = {
timestamp: this.timestamp(),
session: this.sessionId,
level,
message,
...(data && { data: this.truncate(data) })
};
const logLine = JSON.stringify(entry) + '\n';
try {
await appendFile(this.logPath, logLine);
} catch (error) {
// Silent fail - debug logging shouldn't break the app
}
}
/**
* Log API request
*/
async logRequest(provider, model, prompt, options = {}) {
await this.log('API_REQUEST', `${provider}/${model}`, {
promptLength: prompt?.length || 0,
promptPreview: prompt?.substring(0, 200) + '...',
options
});
}
/**
* Log API response
*/
async logResponse(provider, model, response, duration) {
await this.log('API_RESPONSE', `${provider}/${model}`, {
success: response?.success,
responseLength: response?.response?.length || 0,
responsePreview: response?.response?.substring(0, 200) + '...',
durationMs: duration,
usage: response?.usage
});
}
/**
* Log tool execution
*/
async logTool(toolName, input, output, duration) {
await this.log('TOOL', toolName, {
input: this.truncate(input),
output: this.truncate(output),
durationMs: duration
});
}
/**
* Log error
*/
async logError(context, error) {
await this.log('ERROR', context, {
message: error?.message,
stack: error?.stack?.substring(0, 500)
});
}
/**
* Log user command
*/
async logCommand(command, args) {
await this.log('COMMAND', command, { args });
}
/**
* Truncate large objects for logging
*/
truncate(obj, maxLength = 1000) {
if (!obj) return obj;
if (typeof obj === 'string') {
return obj.length > maxLength
? obj.substring(0, maxLength) + '...[truncated]'
: obj;
}
try {
const str = JSON.stringify(obj);
if (str.length > maxLength) {
return JSON.parse(str.substring(0, maxLength) + '..."}}');
}
return obj;
} catch {
return '[Object]';
}
}
/**
* Clear log file
*/
async clear() {
try {
await writeFile(this.logPath, '');
await this.log('DEBUG', 'Log file cleared');
} catch (error) {
// Ignore
}
}
/**
* Get recent log entries
* @param {number} count - Number of entries to return
*/
async getRecent(count = 50) {
try {
const content = await readFile(this.logPath, 'utf8');
const lines = content.trim().split('\n').filter(l => l);
return lines.slice(-count).map(l => {
try {
return JSON.parse(l);
} catch {
return { raw: l };
}
});
} catch {
return [];
}
}
/**
* Get log file path
*/
getPath() {
return this.logPath;
}
}
// Singleton instance
let _logger = null;
export function getDebugLogger(options = {}) {
if (!_logger) {
_logger = new DebugLogger(options);
}
return _logger;
}
// Check CLI args for --debug flag
export function initFromArgs() {
const logger = getDebugLogger();
if (process.argv.includes('--debug')) {
logger.enable();
}
return logger;
}
export default DebugLogger;

176
lib/session-memory.mjs Normal file
View File

@@ -0,0 +1,176 @@
/**
* Session Memory - Persistent context storage for TUI 5
* Allows AI to remember important facts across sessions
*
* Original implementation for OpenQode TUI
*/
import { readFile, writeFile, access } from 'fs/promises';
import { join } from 'path';
import { homedir } from 'os';
const MEMORY_FILE = '.openqode-memory.json';
/**
* SessionMemory class - Manages persistent facts/context across TUI sessions
*/
export class SessionMemory {
constructor(projectPath = null) {
this.projectPath = projectPath || process.cwd();
this.memoryPath = join(this.projectPath, MEMORY_FILE);
this.facts = [];
this.metadata = {
created: null,
lastModified: null,
version: '1.0'
};
}
/**
* Load memory from disk
*/
async load() {
try {
await access(this.memoryPath);
const data = await readFile(this.memoryPath, 'utf8');
const parsed = JSON.parse(data);
this.facts = parsed.facts || [];
this.metadata = parsed.metadata || this.metadata;
return true;
} catch (error) {
// No memory file exists yet - that's OK
this.facts = [];
this.metadata.created = new Date().toISOString();
return false;
}
}
/**
* Save memory to disk
*/
async save() {
this.metadata.lastModified = new Date().toISOString();
if (!this.metadata.created) {
this.metadata.created = this.metadata.lastModified;
}
const data = {
version: '1.0',
metadata: this.metadata,
facts: this.facts
};
await writeFile(this.memoryPath, JSON.stringify(data, null, 2), 'utf8');
return true;
}
/**
* Remember a new fact
* @param {string} fact - The fact to remember
* @param {string} category - Optional category (context, decision, preference, etc.)
*/
async remember(fact, category = 'context') {
const entry = {
id: Date.now(),
fact: fact.trim(),
category,
timestamp: new Date().toISOString()
};
this.facts.push(entry);
await this.save();
return entry;
}
/**
* Forget a fact by ID or index
* @param {number} identifier - Fact ID or index (1-based for user convenience)
*/
async forget(identifier) {
// Try by index first (1-based)
if (identifier > 0 && identifier <= this.facts.length) {
const removed = this.facts.splice(identifier - 1, 1)[0];
await this.save();
return removed;
}
// Try by ID
const index = this.facts.findIndex(f => f.id === identifier);
if (index !== -1) {
const removed = this.facts.splice(index, 1)[0];
await this.save();
return removed;
}
return null;
}
/**
* Clear all memory
*/
async clear() {
this.facts = [];
await this.save();
return true;
}
/**
* Get all facts as a formatted string for AI context
*/
getContextString() {
if (this.facts.length === 0) {
return '';
}
const header = '=== SESSION MEMORY ===\nThe following facts were remembered from previous sessions:\n';
const factsList = this.facts.map((f, i) =>
`${i + 1}. [${f.category}] ${f.fact}`
).join('\n');
return header + factsList + '\n=== END MEMORY ===\n\n';
}
/**
* Get facts formatted for display in UI
*/
getDisplayList() {
return this.facts.map((f, i) => ({
index: i + 1,
...f,
displayDate: new Date(f.timestamp).toLocaleDateString()
}));
}
/**
* Get memory summary for welcome screen
*/
getSummary() {
const count = this.facts.length;
if (count === 0) {
return 'No session memory stored';
}
const categories = {};
this.facts.forEach(f => {
categories[f.category] = (categories[f.category] || 0) + 1;
});
const breakdown = Object.entries(categories)
.map(([cat, num]) => `${num} ${cat}`)
.join(', ');
return `${count} facts remembered (${breakdown})`;
}
}
// Singleton instance for easy import
let _memoryInstance = null;
export function getSessionMemory(projectPath = null) {
if (!_memoryInstance) {
_memoryInstance = new SessionMemory(projectPath);
}
return _memoryInstance;
}
export default SessionMemory;

251
lib/skills.mjs Normal file
View File

@@ -0,0 +1,251 @@
/**
* Skills Library - Pre-built AI prompts for common tasks
* Provides /skills and /skill <name> commands
*
* Original implementation for OpenQode TUI
*/
/**
* Skill definition structure
*/
const SKILLS = {
// Development Skills
test: {
name: 'Unit Tests',
description: 'Generate comprehensive unit tests for code',
category: 'development',
prompt: `Generate comprehensive unit tests for the provided code.
Include:
- Edge cases and boundary conditions
- Error handling scenarios
- Mock dependencies where appropriate
- Clear test descriptions
- Setup and teardown if needed
Format: Use the appropriate testing framework for the language (Jest, pytest, etc.)`
},
refactor: {
name: 'Refactor Code',
description: 'Suggest refactoring improvements',
category: 'development',
prompt: `Analyze the provided code and suggest refactoring improvements.
Focus on:
- Code clarity and readability
- DRY principle violations
- Performance optimizations
- Design pattern opportunities
- Type safety improvements
Provide before/after examples for each suggestion.`
},
review: {
name: 'Code Review',
description: 'Perform a thorough code review',
category: 'development',
prompt: `Perform a thorough code review of the provided code.
Check for:
- Bugs and logic errors
- Security vulnerabilities
- Performance issues
- Code style and consistency
- Documentation gaps
- Error handling
Rate severity: 🔴 Critical | 🟡 Warning | 🟢 Suggestion`
},
debug: {
name: 'Debug Helper',
description: 'Help diagnose and fix bugs',
category: 'development',
prompt: `Help debug the provided code/error.
Approach:
1. Identify the root cause
2. Explain why the error occurs
3. Provide the fix with explanation
4. Suggest prevention strategies
Include stack trace analysis if provided.`
},
// Documentation Skills
docs: {
name: 'Documentation',
description: 'Generate comprehensive documentation',
category: 'documentation',
prompt: `Generate comprehensive documentation for the provided code.
Include:
- Overview/purpose
- Installation/setup (if applicable)
- API reference with parameters and return values
- Usage examples
- Configuration options
- Common issues/FAQ
Format: Markdown with proper headings.`
},
readme: {
name: 'README Generator',
description: 'Create a professional README.md',
category: 'documentation',
prompt: `Create a professional README.md for this project.
Include:
- Project title and badges
- Description
- Features list
- Quick start guide
- Installation steps
- Usage examples
- Configuration
- Contributing guidelines
- License
Make it visually appealing with emojis and formatting.`
},
// Analysis Skills
explain: {
name: 'Code Explainer',
description: 'Explain code in simple terms',
category: 'analysis',
prompt: `Explain the provided code in simple, clear terms.
Structure:
1. High-level purpose (what it does)
2. Step-by-step walkthrough
3. Key concepts used
4. How it fits in larger context
Use analogies where helpful. Suitable for juniors.`
},
security: {
name: 'Security Audit',
description: 'Check for security vulnerabilities',
category: 'analysis',
prompt: `Perform a security audit of the provided code.
Check for:
- Injection vulnerabilities (SQL, XSS, etc.)
- Authentication/authorization issues
- Sensitive data exposure
- Insecure dependencies
- Cryptographic weaknesses
- OWASP Top 10 issues
Severity: 🔴 Critical | 🟠 High | 🟡 Medium | 🟢 Low`
},
// Generation Skills
api: {
name: 'API Design',
description: 'Design REST API endpoints',
category: 'generation',
prompt: `Design REST API endpoints for the described functionality.
Include:
- Endpoint paths and methods
- Request/response schemas (JSON)
- Status codes
- Authentication requirements
- Rate limiting suggestions
- OpenAPI/Swagger format if helpful`
},
schema: {
name: 'Database Schema',
description: 'Design database schema',
category: 'generation',
prompt: `Design a database schema for the described requirements.
Include:
- Tables and columns with types
- Primary/foreign keys
- Indexes for performance
- Relationships diagram (text-based)
- Migration script if helpful
Consider normalization and query patterns.`
}
};
/**
* Get all available skills
*/
export function getAllSkills() {
return Object.entries(SKILLS).map(([id, skill]) => ({
id,
...skill
}));
}
/**
* Get skills grouped by category
*/
export function getSkillsByCategory() {
const categories = {};
Object.entries(SKILLS).forEach(([id, skill]) => {
if (!categories[skill.category]) {
categories[skill.category] = [];
}
categories[skill.category].push({ id, ...skill });
});
return categories;
}
/**
* Get a specific skill by ID
*/
export function getSkill(skillId) {
return SKILLS[skillId] ? { id: skillId, ...SKILLS[skillId] } : null;
}
/**
* Execute a skill - returns the prompt to inject
* @param {string} skillId - Skill ID
* @param {string} userInput - User's additional input/code
*/
export function executeSkill(skillId, userInput = '') {
const skill = getSkill(skillId);
if (!skill) return null;
const fullPrompt = `[SKILL: ${skill.name}]
${skill.prompt}
USER INPUT/CODE:
${userInput}
Please proceed with the ${skill.name.toLowerCase()} task.`;
return {
skill,
prompt: fullPrompt
};
}
/**
* Get formatted skill list for display
*/
export function getSkillListDisplay() {
const categories = getSkillsByCategory();
let output = '';
for (const [category, skills] of Object.entries(categories)) {
output += `\n📁 ${category.toUpperCase()}\n`;
skills.forEach(skill => {
output += ` /skill ${skill.id.padEnd(10)} - ${skill.description}\n`;
});
}
return output;
}
export default {
getAllSkills,
getSkillsByCategory,
getSkill,
executeSkill,
getSkillListDisplay
};