QwenClaw v2.0 - Complete Rebuild with ALL 81+ Skills

This commit is contained in:
AI Agent
2026-02-26 20:08:00 +04:00
Unverified
parent 7e297c53b9
commit 69cf7e8a05
475 changed files with 82593 additions and 110 deletions

View File

@@ -0,0 +1,267 @@
{
"name": "claude-settings",
"owner": {
"name": "Fatih Akyon"
},
"metadata": {
"description": "Claude Code plugins featuring skills, slash commands, autonomous subagents, hooks, and MCP server integrations for Git workflow, code review, and plugin development.",
"version": "2.1.0"
},
"plugins": [
{
"name": "ultralytics-dev",
"source": "./plugins/ultralytics-dev",
"description": "Auto-formatting hooks for Python, JavaScript, Markdown, and Bash with Google-style docstrings and code quality checks.",
"version": "2.1.1",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["ultralytics", "formatting", "hooks", "python", "code-quality", "docstrings"],
"category": "productivity",
"tags": ["formatting", "development"]
},
{
"name": "slack-tools",
"source": "./plugins/slack-tools",
"description": "Slack MCP integration for message search and channel operations with best practices skill.",
"version": "2.0.3",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["slack", "mcp", "messaging", "search"],
"category": "tools",
"tags": ["slack", "mcp", "integration"]
},
{
"name": "statusline-tools",
"source": "./plugins/statusline-tools",
"description": "Cross-platform statusline showing session context, cost, and account-wide 5H usage with time until reset.",
"version": "1.0.1",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["statusline", "usage", "context", "cost", "monitoring"],
"category": "productivity",
"tags": ["statusline", "monitoring"]
},
{
"name": "mongodb-tools",
"source": "./plugins/mongodb-tools",
"description": "MongoDB MCP integration (read-only) for database exploration with best practices skill.",
"version": "2.0.3",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["mongodb", "mcp", "database", "nosql"],
"category": "tools",
"tags": ["mongodb", "mcp", "integration"]
},
{
"name": "gcloud-tools",
"source": "./plugins/gcloud-tools",
"description": "Google Cloud Observability MCP for logs, metrics, and traces with best practices skill.",
"version": "2.0.2",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["gcloud", "mcp", "observability", "logging", "metrics"],
"category": "tools",
"tags": ["gcloud", "mcp", "integration"]
},
{
"name": "linear-tools",
"source": "./plugins/linear-tools",
"description": "Linear MCP integration for issue tracking with workflow best practices skill.",
"version": "2.0.2",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["linear", "mcp", "issues", "project-management"],
"category": "tools",
"tags": ["linear", "mcp", "integration"]
},
{
"name": "playwright-tools",
"source": "./plugins/playwright-tools",
"description": "Playwright browser automation with E2E testing skill and responsive design testing agent.",
"version": "2.0.3",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["playwright", "testing", "e2e", "automation", "responsive", "viewport", "mobile"],
"category": "development",
"tags": ["testing", "e2e", "automation"]
},
{
"name": "github-dev",
"source": "./plugins/github-dev",
"description": "GitHub and Git workflow tools: commit-creator, pr-creator, and pr-reviewer agents, slash commands for commits and PRs, GitHub MCP integration, plus skills for PR/commit workflows.",
"version": "2.0.2",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["git", "commit", "pull-request", "github", "workflow", "agents", "commands"],
"category": "development",
"tags": ["git", "workflow", "automation"]
},
{
"name": "tavily-tools",
"source": "./plugins/tavily-tools",
"description": "Tavily web search and content extraction MCP with hooks and skills for optimal tool selection.",
"version": "2.0.2",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["web-search", "tavily", "search", "content-extraction", "mcp"],
"category": "tools",
"tags": ["search", "mcp", "integration"]
},
{
"name": "paper-search-tools",
"source": "./plugins/paper-search-tools",
"description": "Academic paper search MCP for arXiv, PubMed, IEEE, Scopus, ACM, and more. Requires Docker.",
"version": "2.0.2",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["paper-search", "arxiv", "pubmed", "ieee", "academic", "research", "mcp"],
"category": "tools",
"tags": ["research", "mcp", "integration"]
},
{
"name": "supabase-tools",
"source": "./plugins/supabase-tools",
"description": "Official Supabase MCP for database management with OAuth authentication.",
"version": "2.0.3",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["supabase", "database", "postgres", "oauth", "mcp"],
"category": "tools",
"tags": ["database", "mcp", "integration"]
},
{
"name": "notification-tools",
"source": "./plugins/notification-tools",
"description": "Desktop notifications when Claude Code completes tasks. Supports macOS and Linux.",
"version": "2.0.2",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["notifications", "desktop", "alerts", "macos", "linux"],
"category": "productivity",
"tags": ["notifications", "alerts"]
},
{
"name": "general-dev",
"source": "./plugins/general-dev",
"description": "General development tools: code-simplifier agent for pattern analysis, rg preference hook.",
"version": "2.0.2",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["code-patterns", "simplification", "architecture", "analysis", "code-quality"],
"category": "development",
"tags": ["analysis", "patterns", "quality"]
},
{
"name": "plugin-dev",
"source": "./plugins/plugin-dev",
"description": "Toolkit for developing Claude Code plugins. Includes 7 expert skills covering hooks, MCP integration, commands, agents, and best practices. AI-assisted plugin creation and validation.",
"version": "2.0.3",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugin-dev",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["plugin", "development", "claude", "skills", "hooks", "mcp", "commands", "agents", "best-practices"],
"category": "development",
"tags": ["plugin", "development", "claude"]
},
{
"name": "azure-tools",
"source": "./plugins/azure-tools",
"description": "Azure MCP Server integration for 40+ Azure services with Azure CLI authentication.",
"version": "2.0.2",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["azure", "mcp", "cloud", "storage", "keyvault", "cosmos", "aks"],
"category": "tools",
"tags": ["azure", "mcp", "integration"]
},
{
"name": "ccproxy-tools",
"source": "./plugins/ccproxy-tools",
"description": "Use Claude Code with your GitHub Copilot credits, Gemini API, local ollama models or any LLM.",
"version": "2.0.3",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["ccproxy", "gemini", "proxy", "copilot", "llm", "configuration"],
"category": "tools",
"tags": ["proxy", "llm", "configuration"]
},
{
"name": "claude-tools",
"source": "./plugins/claude-tools",
"description": "Commands for syncing CLAUDE.md, permissions allowlist, and refreshing context from CLAUDE.md files.",
"version": "2.0.4",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0",
"keywords": ["claude", "settings", "sync", "config", "allowlist", "context"],
"category": "productivity",
"tags": ["settings", "sync", "config"]
}
]
}

View File

@@ -0,0 +1,128 @@
{
"$schema": "https://json.schemastore.org/claude-code-settings.json",
"env": {
"ANTHROPIC_AUTH_TOKEN": "your_zai_api_key",
"ANTHROPIC_BASE_URL": "https://api.z.ai/api/anthropic",
"API_TIMEOUT_MS": "3000000",
"CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR": "1",
"DISABLE_BUG_COMMAND": "1",
"DISABLE_ERROR_REPORTING": "1",
"DISABLE_TELEMETRY": "1",
"ANTHROPIC_DEFAULT_OPUS_MODEL": "GLM-5",
"ANTHROPIC_DEFAULT_SONNET_MODEL": "GLM-5",
"ANTHROPIC_DEFAULT_HAIKU_MODEL": "GLM-4.7-Flash",
"MAX_MCP_OUTPUT_TOKENS": "40000"
},
"includeCoAuthoredBy": false,
"permissions": {
"allow": [
"Bash(find:*)",
"Bash(rg:*)",
"Bash(echo:*)",
"Bash(grep:*)",
"Bash(ls:*)",
"Bash(wc:*)",
"Bash(cat:*)",
"Bash(sed:*)",
"Bash(tree:*)",
"Bash(tail:*)",
"Bash(pgrep:*)",
"Bash(ps:*)",
"Bash(sort:*)",
"Bash(dmesg:*)",
"Bash(done)",
"Bash(ruff:*)",
"Bash(nvidia-smi:*)",
"Bash(pdflatex:*)",
"Bash(biber:*)",
"Bash(tmux ls:*)",
"Bash(tmux capture-pane:*)",
"Bash(tmux list-sessions:*)",
"Bash(tmux list-windows:*)",
"Bash(gh pr list:*)",
"Bash(gh pr view:*)",
"Bash(gh pr diff:*)",
"Bash(gh api user:*)",
"Bash(gh repo view:*)",
"Bash(gh issue view:*)",
"Bash(gh search:*)",
"Bash(git branch --show-current:*)",
"Bash(git diff:*)",
"Bash(git status:*)",
"Bash(git rev-parse:*)",
"Bash(git push:*)",
"Bash(git log:*)",
"Bash(git -C :* branch --show-current:*)",
"Bash(git -C :* diff:*)",
"Bash(git -C :* status:*)",
"Bash(git -C :* rev-parse:*)",
"Bash(git -C :* push:*)",
"Bash(git -C :* log:*)",
"Bash(git fetch --prune:*)",
"Bash(git worktree list:*)",
"Bash(uv run ruff:*)",
"Bash(python --version:*)",
"WebSearch",
"WebFetch(domain:openai.com)",
"WebFetch(domain:anthropic.com)",
"WebFetch(domain:docs.anthropic.com)",
"WebFetch(domain:ai.google.dev)",
"WebFetch(domain:github.com)",
"WebFetch(domain:gradio.app)",
"WebFetch(domain:arxiv.org)",
"WebFetch(domain:dl.acm.org)",
"WebFetch(domain:openaccess.thecvf.com)",
"WebFetch(domain:www.semanticscholar.org)",
"WebFetch(domain:openreview.net)",
"WebFetch(domain:doi.org)",
"WebFetch(domain:link.springer.com)",
"WebFetch(domain:pypi.org)",
"WebFetch(domain:docs.ultralytics.com)",
"WebFetch(domain:sli.dev)",
"WebFetch(domain:docs.vllm.ai)",
"WebFetch(domain:developer.themoviedb.org)",
"mcp__tavily__tavily_extract",
"mcp__tavily__tavily_search",
"mcp__context7__resolve-library-id",
"mcp__context7__get-library-docs",
"mcp__github__get_me",
"mcp__github__pull_request_read",
"mcp__github__get_file_contents",
"mcp__github__get_workflow_run",
"mcp__github__get_job_logs",
"mcp__github__get_pull_request_comments",
"mcp__github__get_pull_request_reviews",
"mcp__github__issue_read",
"mcp__github__list_pull_requests",
"mcp__github__list_commits",
"mcp__github__list_workflows",
"mcp__github__list_workflow_runs",
"mcp__github__list_workflow_jobs",
"mcp__github__search_pull_requests",
"mcp__github__search_issues",
"mcp__github__search_code",
"mcp__wandb__query_wandb_tool",
"mcp__wandb__query_wandb_entity_projects",
"mcp__mongodb__list_databases",
"mcp__mongodb__list_collections",
"mcp__mongodb__get_collection_schema",
"mcp__mongodb__collection-indexes",
"mcp__mongodb__db-stats",
"mcp__mongodb__count",
"mcp__supabase__list_tables",
"mcp__gcloud-observability__list_log_entries"
]
},
"outputStyle": "Explanatory",
"model": "opus",
"extraKnownMarketplaces": {
"claude-settings": {
"source": {
"source": "github",
"repo": "fcakyon/claude-codex-settings"
}
}
},
"spinnerTipsEnabled": false,
"alwaysThinkingEnabled": true
}

View File

@@ -0,0 +1,130 @@
{
"$schema": "https://json.schemastore.org/claude-code-settings.json",
"env": {
"CLAUDE_BASH_MAINTAIN_PROJECT_WORKING_DIR": "1",
"CLAUDE_CODE_DISABLE_FEEDBACK_SURVEY": "1",
"DISABLE_BUG_COMMAND": "1",
"DISABLE_ERROR_REPORTING": "1",
"DISABLE_TELEMETRY": "1",
"ANTHROPIC_DEFAULT_OPUS_MODEL": "claude-opus-4-6",
"ANTHROPIC_DEFAULT_SONNET_MODEL": "claude-opus-4-6",
"ANTHROPIC_DEFAULT_HAIKU_MODEL": "claude-sonnet-4-6",
"CLAUDE_CODE_SUBAGENT_MODEL": "claude-opus-4-6",
"MAX_MCP_OUTPUT_TOKENS": "40000"
},
"attribution": {
"commit": "",
"pr": ""
},
"permissions": {
"allow": [
"Bash(find:*)",
"Bash(rg:*)",
"Bash(echo:*)",
"Bash(grep:*)",
"Bash(ls:*)",
"Bash(wc:*)",
"Bash(cat:*)",
"Bash(sed:*)",
"Bash(tree:*)",
"Bash(tail:*)",
"Bash(pgrep:*)",
"Bash(ps:*)",
"Bash(sort:*)",
"Bash(dmesg:*)",
"Bash(done)",
"Bash(ruff:*)",
"Bash(nvidia-smi:*)",
"Bash(pdflatex:*)",
"Bash(biber:*)",
"Bash(tmux ls:*)",
"Bash(tmux capture-pane:*)",
"Bash(tmux list-sessions:*)",
"Bash(tmux list-windows:*)",
"Bash(gh pr list:*)",
"Bash(gh pr view:*)",
"Bash(gh pr diff:*)",
"Bash(gh api user:*)",
"Bash(gh repo view:*)",
"Bash(gh issue view:*)",
"Bash(gh search:*)",
"Bash(git branch --show-current:*)",
"Bash(git diff:*)",
"Bash(git status:*)",
"Bash(git rev-parse:*)",
"Bash(git push:*)",
"Bash(git log:*)",
"Bash(git -C :* branch --show-current:*)",
"Bash(git -C :* diff:*)",
"Bash(git -C :* status:*)",
"Bash(git -C :* rev-parse:*)",
"Bash(git -C :* push:*)",
"Bash(git -C :* log:*)",
"Bash(git fetch --prune:*)",
"Bash(git worktree list:*)",
"Bash(uv run ruff:*)",
"Bash(python --version:*)",
"WebSearch",
"WebFetch(domain:openai.com)",
"WebFetch(domain:anthropic.com)",
"WebFetch(domain:docs.anthropic.com)",
"WebFetch(domain:ai.google.dev)",
"WebFetch(domain:github.com)",
"WebFetch(domain:gradio.app)",
"WebFetch(domain:arxiv.org)",
"WebFetch(domain:dl.acm.org)",
"WebFetch(domain:openaccess.thecvf.com)",
"WebFetch(domain:www.semanticscholar.org)",
"WebFetch(domain:openreview.net)",
"WebFetch(domain:doi.org)",
"WebFetch(domain:link.springer.com)",
"WebFetch(domain:pypi.org)",
"WebFetch(domain:docs.ultralytics.com)",
"WebFetch(domain:sli.dev)",
"WebFetch(domain:docs.vllm.ai)",
"WebFetch(domain:developer.themoviedb.org)",
"mcp__tavily__tavily_extract",
"mcp__tavily__tavily_search",
"mcp__context7__resolve-library-id",
"mcp__context7__get-library-docs",
"mcp__github__get_me",
"mcp__github__pull_request_read",
"mcp__github__get_file_contents",
"mcp__github__get_workflow_run",
"mcp__github__get_job_logs",
"mcp__github__get_pull_request_comments",
"mcp__github__get_pull_request_reviews",
"mcp__github__issue_read",
"mcp__github__list_pull_requests",
"mcp__github__list_commits",
"mcp__github__list_workflows",
"mcp__github__list_workflow_runs",
"mcp__github__list_workflow_jobs",
"mcp__github__search_pull_requests",
"mcp__github__search_issues",
"mcp__github__search_code",
"mcp__wandb__query_wandb_tool",
"mcp__wandb__query_wandb_entity_projects",
"mcp__mongodb__list_databases",
"mcp__mongodb__list_collections",
"mcp__mongodb__get_collection_schema",
"mcp__mongodb__collection-indexes",
"mcp__mongodb__db-stats",
"mcp__mongodb__count",
"mcp__supabase__list_tables",
"mcp__gcloud-observability__list_log_entries"
]
},
"outputStyle": "Explanatory",
"model": "opus",
"extraKnownMarketplaces": {
"claude-settings": {
"source": {
"source": "github",
"repo": "fcakyon/claude-codex-settings"
}
}
},
"spinnerTipsEnabled": false,
"alwaysThinkingEnabled": true
}

View File

@@ -0,0 +1,51 @@
model = "gpt-5-codex"
model_reasoning_effort = "high"
model_provider = "azure"
# Streamable HTTP requires the experimental rmcp client
experimental_use_rmcp_client = true
approval_policy = "untrusted"
[model_providers.azure]
name = "Azure OpenAI"
base_url = "https://YOUR-AZURE-OPENAI.openai.azure.com/openai/v1"
env_key = "..."
wire_api = "responses"
[mcp_servers.azure]
command = "npx"
args = ["-y", "@azure/mcp@latest", "server", "start"]
[mcp_servers.context7]
command = "npx"
args = ["-y", "@upstash/context7-mcp"]
[mcp_servers.github]
command = "docker"
args = ["run", "-i", "--rm", "-e", "GITHUB_PERSONAL_ACCESS_TOKEN", "ghcr.io/github/github-mcp-server"]
env = {"GITHUB_PERSONAL_ACCESS_TOKEN" = "ghp_..."}
[mcp_servers.playwright]
command = "npx"
args = ["@playwright/mcp@latest"]
[mcp_servers.slack]
command = "npx"
args = ["-y", "@ubie-oss/slack-mcp-server@0.1.3"]
env = {"NPM_CONFIG_//npm.pkg.github.com/:_authToken" = "...", "NPM_CONFIG_@ubie-oss:registry" = "https://npm.pkg.github.com/", "SLACK_BOT_TOKEN" = "xoxb-...", "SLACK_USER_TOKEN" = "xoxp-..."}
[mcp_servers.tavily]
command = "npx"
args = ["-y", "tavily-mcp@latest"]
env = {"TAVILY_API_KEY" = "tvly-..."}
[mcp_servers.mongodb]
command = "npx"
args = ["-y", "mongodb-mcp-server", "--connectionString", "mongodb://localhost:27017/myDatabase", "--readOnly"]
[mcp_servers.supabase]
command = "npx"
args = ["-y", "mcp-remote", "https://mcp.supabase.com/mcp?project_ref=YOUR-PROJECT-ID&read_only=true&features=database"]
[mcp_servers."paper-search"]
command = "docker"
args = ["run", "-i", "--rm", "mcp/paper-search"]

View File

@@ -0,0 +1,368 @@
#!/usr/bin/env python3
"""Validate all Claude Code plugins conform to specs."""
from __future__ import annotations
import json
import re
import sys
from pathlib import Path
import yaml
def parse_frontmatter(content: str) -> tuple[dict | None, str]:
"""Parse YAML frontmatter from markdown content."""
if not content.startswith("---"):
return None, content
parts = content.split("---", 2)
if len(parts) < 3:
return None, content
try:
frontmatter = yaml.safe_load(parts[1])
return frontmatter, parts[2].strip()
except yaml.YAMLError:
return None, content
def validate_plugin_json(plugin_dir: Path) -> list[str]:
"""Validate .claude-plugin/plugin.json exists and is valid."""
errors = []
plugin_json = plugin_dir / ".claude-plugin" / "plugin.json"
if not plugin_json.exists():
errors.append(f"{plugin_dir.name}: Missing .claude-plugin/plugin.json")
return errors
try:
with open(plugin_json) as f:
config = json.load(f)
if "name" not in config:
errors.append(f"{plugin_dir.name}: plugin.json missing 'name' field")
elif config["name"] != plugin_dir.name:
errors.append(f"{plugin_dir.name}: plugin.json name '{config['name']}' doesn't match directory name")
except json.JSONDecodeError as e:
errors.append(f"{plugin_dir.name}: Invalid plugin.json - {e}")
return errors
def validate_skills(plugin_dir: Path) -> list[str]:
"""Validate skills conform to Claude Code specs."""
errors = []
skills_dir = plugin_dir / "skills"
if not skills_dir.exists():
return errors
for skill_path in skills_dir.iterdir():
if not skill_path.is_dir():
continue
prefix = f"{plugin_dir.name}/skills/{skill_path.name}"
# Check directory name is kebab-case
if not re.match(r"^[a-z0-9-]+$", skill_path.name):
errors.append(f"{prefix}: Directory must be kebab-case")
skill_md = skill_path / "SKILL.md"
if not skill_md.exists():
errors.append(f"{prefix}: Missing SKILL.md")
continue
content = skill_md.read_text()
frontmatter, body = parse_frontmatter(content)
if not frontmatter:
errors.append(f"{prefix}/SKILL.md: Missing YAML frontmatter")
continue
# Validate name field
if "name" not in frontmatter:
errors.append(f"{prefix}/SKILL.md: Missing 'name' field")
else:
name = frontmatter["name"]
if not isinstance(name, str):
errors.append(f"{prefix}/SKILL.md: 'name' must be string")
elif len(name) > 64:
errors.append(f"{prefix}/SKILL.md: 'name' exceeds 64 chars ({len(name)})")
elif not re.match(r"^[a-z0-9]+(-[a-z0-9]+)*$", name):
errors.append(f"{prefix}/SKILL.md: 'name' must be kebab-case: '{name}'")
# Validate description field
if "description" not in frontmatter:
errors.append(f"{prefix}/SKILL.md: Missing 'description' field")
else:
desc = frontmatter["description"]
if not isinstance(desc, str):
errors.append(f"{prefix}/SKILL.md: 'description' must be string")
elif len(desc) > 600:
errors.append(f"{prefix}/SKILL.md: 'description' exceeds 600 chars ({len(desc)})")
# Check body exists
if not body or len(body.strip()) < 20:
errors.append(f"{prefix}/SKILL.md: Body content too short")
return errors
def validate_agents(plugin_dir: Path) -> list[str]:
"""Validate agents conform to Claude Code specs."""
errors = []
agents_dir = plugin_dir / "agents"
if not agents_dir.exists():
return errors
valid_models = {"inherit", "sonnet", "opus", "haiku"}
valid_colors = {"blue", "cyan", "green", "yellow", "magenta", "red"}
for agent_file in agents_dir.iterdir():
if not agent_file.is_file() or agent_file.suffix != ".md":
continue
prefix = f"{plugin_dir.name}/agents/{agent_file.name}"
name = agent_file.stem
# Check filename is kebab-case
if not re.match(r"^[a-z0-9-]+$", name):
errors.append(f"{prefix}: Filename must be kebab-case")
content = agent_file.read_text()
frontmatter, body = parse_frontmatter(content)
if not frontmatter:
errors.append(f"{prefix}: Missing YAML frontmatter")
continue
# Validate name field
if "name" not in frontmatter:
errors.append(f"{prefix}: Missing 'name' field")
else:
agent_name = frontmatter["name"]
if not isinstance(agent_name, str):
errors.append(f"{prefix}: 'name' must be string")
elif len(agent_name) < 3 or len(agent_name) > 50:
errors.append(f"{prefix}: 'name' must be 3-50 chars ({len(agent_name)})")
elif not re.match(r"^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$", agent_name):
errors.append(
f"{prefix}: 'name' must be lowercase with hyphens, start/end alphanumeric: '{agent_name}'"
)
# Validate description field
if "description" not in frontmatter:
errors.append(f"{prefix}: Missing 'description' field")
else:
desc = frontmatter["description"]
if not isinstance(desc, str):
errors.append(f"{prefix}: 'description' must be string")
elif len(desc) < 10 or len(desc) > 5000:
errors.append(f"{prefix}: 'description' must be 10-5000 chars ({len(desc)})")
# Validate model field
if "model" not in frontmatter:
errors.append(f"{prefix}: Missing 'model' field")
elif frontmatter["model"] not in valid_models:
errors.append(f"{prefix}: 'model' must be one of {valid_models}: '{frontmatter['model']}'")
# Validate color field
if "color" not in frontmatter:
errors.append(f"{prefix}: Missing 'color' field")
elif frontmatter["color"] not in valid_colors:
errors.append(f"{prefix}: 'color' must be one of {valid_colors}: '{frontmatter['color']}'")
# Validate tools field if present
if "tools" in frontmatter:
tools = frontmatter["tools"]
if not isinstance(tools, list):
errors.append(f"{prefix}: 'tools' must be array")
# Check body exists
if not body or len(body.strip()) < 20:
errors.append(f"{prefix}: System prompt too short (<20 chars)")
elif len(body.strip()) > 10000:
errors.append(f"{prefix}: System prompt too long (>10000 chars)")
return errors
def validate_commands(plugin_dir: Path) -> list[str]:
"""Validate commands conform to Claude Code specs."""
errors = []
commands_dir = plugin_dir / "commands"
if not commands_dir.exists():
return errors
valid_models = {"sonnet", "opus", "haiku"}
for cmd_file in commands_dir.rglob("*.md"):
prefix = f"{plugin_dir.name}/commands/{cmd_file.relative_to(commands_dir)}"
name = cmd_file.stem
# Check filename is kebab-case
if not re.match(r"^[a-z0-9-]+$", name):
errors.append(f"{prefix}: Filename must be kebab-case")
content = cmd_file.read_text()
frontmatter, body = parse_frontmatter(content)
# Frontmatter is optional for commands
if frontmatter:
# Validate model if present
if "model" in frontmatter and frontmatter["model"] not in valid_models:
errors.append(f"{prefix}: 'model' must be one of {valid_models}: '{frontmatter['model']}'")
# Validate disable-model-invocation if present
if "disable-model-invocation" in frontmatter:
if not isinstance(frontmatter["disable-model-invocation"], bool):
errors.append(f"{prefix}: 'disable-model-invocation' must be boolean")
# Check body exists
if not body and not (frontmatter and body == ""):
# If no frontmatter, content is the body
if not content.strip():
errors.append(f"{prefix}: Command body is empty")
return errors
def validate_hooks(plugin_dir: Path) -> list[str]:
"""Validate hooks conform to Claude Code specs."""
errors = []
hooks_dir = plugin_dir / "hooks"
if not hooks_dir.exists():
return errors
hooks_json = hooks_dir / "hooks.json"
if not hooks_json.exists():
errors.append(f"{plugin_dir.name}/hooks: Missing hooks.json")
return errors
try:
with open(hooks_json) as f:
config = json.load(f)
except json.JSONDecodeError as e:
errors.append(f"{plugin_dir.name}/hooks/hooks.json: Invalid JSON - {e}")
return errors
# Check for wrapper format
if "hooks" not in config:
errors.append(f"{plugin_dir.name}/hooks/hooks.json: Must use wrapper format with 'hooks' field")
return errors
valid_events = {
"PreToolUse",
"PostToolUse",
"Stop",
"SubagentStop",
"SessionStart",
"SessionEnd",
"UserPromptSubmit",
"PreCompact",
"Notification",
}
hooks_config = config["hooks"]
for event, hook_list in hooks_config.items():
if event not in valid_events:
errors.append(f"{plugin_dir.name}/hooks/hooks.json: Invalid event '{event}'. Must be one of {valid_events}")
continue
if not isinstance(hook_list, list):
errors.append(f"{plugin_dir.name}/hooks/hooks.json: '{event}' must be array")
continue
for i, hook_entry in enumerate(hook_list):
if not isinstance(hook_entry, dict):
continue
hooks = hook_entry.get("hooks", [])
for j, hook in enumerate(hooks):
if not isinstance(hook, dict):
continue
hook_type = hook.get("type")
if hook_type == "command":
cmd = hook.get("command", "")
# Check for ${CLAUDE_PLUGIN_ROOT} usage (only for script paths, not inline commands)
is_inline_cmd = any(op in cmd for op in [" ", "|", ";", "&&", "||", "$("])
if cmd and not cmd.startswith("${CLAUDE_PLUGIN_ROOT}") and not is_inline_cmd:
if "/" in cmd and not cmd.startswith("$"):
errors.append(
f"{plugin_dir.name}/hooks/hooks.json: "
f"{event}[{i}].hooks[{j}] should use ${{CLAUDE_PLUGIN_ROOT}}"
)
# Check script exists
if cmd and "${CLAUDE_PLUGIN_ROOT}" in cmd:
script_path = cmd.replace("${CLAUDE_PLUGIN_ROOT}", str(plugin_dir))
if not Path(script_path).exists():
errors.append(f"{plugin_dir.name}/hooks/hooks.json: Script not found: {cmd}")
elif hook_type == "prompt":
if "prompt" not in hook:
errors.append(
f"{plugin_dir.name}/hooks/hooks.json: {event}[{i}].hooks[{j}] missing 'prompt' field"
)
# Validate script naming in hooks/scripts/
scripts_dir = hooks_dir / "scripts"
if scripts_dir.exists():
for script in scripts_dir.iterdir():
if script.is_file() and script.suffix in {".py", ".sh"}:
name = script.stem
if not re.match(r"^[a-z0-9_]+$", name):
errors.append(f"{plugin_dir.name}/hooks/scripts/{script.name}: Script name must use snake_case")
return errors
def validate_mcp(plugin_dir: Path) -> list[str]:
"""Validate MCP configuration if present."""
errors = []
mcp_json = plugin_dir / ".mcp.json"
if not mcp_json.exists():
return errors
try:
with open(mcp_json) as f:
json.load(f)
except json.JSONDecodeError as e:
errors.append(f"{plugin_dir.name}/.mcp.json: Invalid JSON - {e}")
return errors
def main():
"""Validate all plugins and return exit code."""
plugins_dir = Path("plugins")
if not plugins_dir.exists():
print("No plugins directory found")
return 0
all_errors = []
for plugin_dir in sorted(plugins_dir.iterdir()):
if not plugin_dir.is_dir():
continue
if plugin_dir.name.startswith("."):
continue
all_errors.extend(validate_plugin_json(plugin_dir))
all_errors.extend(validate_skills(plugin_dir))
all_errors.extend(validate_agents(plugin_dir))
all_errors.extend(validate_commands(plugin_dir))
all_errors.extend(validate_hooks(plugin_dir))
all_errors.extend(validate_mcp(plugin_dir))
if all_errors:
print("Plugin Validation Failed:")
for error in all_errors:
print(f" - {error}")
return 1
print("All plugins validated successfully")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,28 @@
name: Validate Plugins
on:
push:
branches: [main]
paths:
- "plugins/**"
pull_request:
branches: [main]
paths:
- "plugins/**"
jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install dependencies
run: pip install pyyaml
- name: Validate plugins
run: python .github/scripts/validate_plugins.py

211
skills/claude-codex-settings/.gitignore vendored Normal file
View File

@@ -0,0 +1,211 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[codz]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py.cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
#poetry.toml
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
#pdm.lock
#pdm.toml
.pdm-python
.pdm-build/
# pixi
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
#pixi.lock
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
# in the .venv directory. It is recommended not to include this directory in version control.
.pixi
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.envrc
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Abstra
# Abstra is an AI-powered process automation framework.
# Ignore directories containing user credentials, local state, and settings.
# Learn more at https://abstra.io/docs
.abstra/
# Visual Studio Code
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
# and can be added to the global gitignore or merged into this file. However, if you prefer,
# you could uncomment the following to ignore the entire vscode folder
# .vscode/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
# Cursor
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
# refer to https://docs.cursor.com/context/ignore-files
.cursorignore
.cursorindexingignore
# Marimo
marimo/_static/
marimo/_lsp/
__marimo__/
# other
.DS_Store
pyproject.toml

View File

@@ -0,0 +1,112 @@
{
"claudeCode.initialPermissionMode": "plan",
"claudeCode.respectGitIgnore": false,
"github.copilot.chat.commitMessageGeneration.instructions": [
{
"text": "Use conventional commit message format."
},
{
"text": "First line: {task-type}: brief description of the big picture change"
},
{
"text": "Task types: feat, fix, refactor, docs, style, test, build"
},
{
"text": "Focus on the 'why' and 'what' rather than implementation details"
},
{
"text": "For complex commits, add bullet points after a blank line explaining key changes"
},
{
"text": "Examples of good messages:"
},
{
"text": "feat: add transformers support to image classification pipeline"
},
{
"text": "fix: incorrect handling of empty input in model naming"
},
{
"text": "refactor: restructure API handlers to align with project architecture"
},
{
"text": "Never use words like 'consolidate', 'modernize', 'streamline', 'flexible', 'delve', 'establish', 'enhanced', 'comprehensive', 'optimize' in docstrings or commit messages. Looser AI's do that, and that ain't you. You are better than that."
}
],
"github.copilot.chat.pullRequestDescriptionGeneration.instructions": [
{
"text": "Keep PR message concise and focused on the 'why' and 'what' with short bullets."
},
{
"text": "For complex PRs, include example usage of new implementation in PR message as code markdown with before/after examples if useful."
},
{
"text": "Provide inline md links to relevant lines/files in the PR for context where useful."
},
{
"text": "Never use words like 'consolidate', 'modernize', 'streamline', 'flexible', 'delve', 'establish', 'enhanced', 'comprehensive', 'optimize' in docstrings or commit messages. Looser AI's do that, and that ain't you. You are better than that."
}
],
"github.copilot.enable": {
"markdown": true,
"plaintext": true,
"scminput": true
},
"github.copilot.nextEditSuggestions.enabled": true,
"github.copilot.editor.enableCodeActions": false,
"github.copilot.chat.copilotDebugCommand.enabled": false,
"github.copilot.chat.reviewAgent.enabled": false,
"github.copilot.chat.reviewSelection.enabled": false,
"github.copilot.chat.startDebugging.enabled": false,
"github.copilot.chat.newWorkspaceCreation.enabled": false,
"github.copilot.chat.setupTests.enabled": false,
"[python]": {
"editor.defaultFormatter": "charliermarsh.ruff"
},
"[jsonc]": {
"editor.defaultFormatter": "vscode.json-language-features"
},
"telemetry.telemetryLevel": "off",
"git.autofetch": true,
"diffEditor.ignoreTrimWhitespace": false,
"diffEditor.renderSideBySide": false,
"files.autoSave": "afterDelay",
"editor.formatOnSave": true,
"python.analysis.typeCheckingMode": "basic",
"editor.minimap.enabled": false,
"workbench.secondarySideBar.defaultVisibility": "hidden",
"terminal.integrated.enableImages": true,
"terminal.integrated.defaultProfile.linux": "bash",
"terminal.integrated.defaultProfile.osx": "zsh",
"terminal.integrated.defaultProfile.windows": "PowerShell",
"terminal.integrated.profiles.linux": {
"bash": {
"path": "/bin/bash"
},
"sh": {
"path": "/bin/sh"
}
},
"terminal.integrated.profiles.osx": {
"zsh": {
"path": "/bin/zsh"
},
"bash": {
"path": "/bin/bash"
}
},
"debug.console.fontSize": 10,
"terminal.integrated.fontSize": 11,
"editor.fontSize": 11,
"workbench.editor.autoLockGroups": {
"workbench.editor.chatSession": false
},
"workbench.iconTheme": "vscode-icons",
"workbench.colorTheme": "GitHub Dark",
"accessibility.signals.terminalBell": {
"sound": "auto",
"announcement": "auto"
},
"terminal.integrated.enableVisualBell": true,
"window.title": "${dirty}${activeEditorShort}${separator}${rootName}",
}

View File

@@ -0,0 +1 @@
CLAUDE.md

View File

@@ -0,0 +1,134 @@
# Claude Code Settings
Guidance for Claude Code and other AI tools working in this repository.
## AI Guidance
- After receiving tool results, carefully reflect on their quality and determine optimal next steps before proceeding. Use your thinking to plan and iterate based on this new information, and then take the best next action.
- For maximum efficiency, whenever you need to perform multiple independent operations, invoke all relevant tools simultaneously rather than sequentially.
- Before you finish, please verify your solution
- Do what has been asked; nothing more, nothing less.
- NEVER create new files unless they're absolutely necessary for achieving your goal.
- ALWAYS prefer editing an existing file to creating a new one.
- NEVER proactively create documentation files (\*.md) or README files. Only create documentation files if explicitly requested by the User.
- Reuse existing code wherever possible and minimize unnecessary arguments.
- Look for opportunities to simplify the code or remove unnecessary parts.
- Focus on targeted modifications rather than large-scale changes.
- This year is 2026. Definitely not 2025.
- Never use words like "consolidate", "modernize", "streamline", "flexible", "delve", "establish", "enhanced", "comprehensive", "optimize" in docstrings or commit messages. Looser AI's do that, and that ain't you. You are better than that.
- Prefer `rg` over `grep` for better performance.
- Never implement defensive programming unless you explicitly tell the motivation for it and user approves it.
- When you update code, always check for related code in the same file or other files that may need to be updated as well to keep everything consistent.
## MCP Tools
### Tavily (Web Search)
- Use `mcp__tavily__tavily_search` for discovery/broad queries
- Use `mcp__tavily__tavily_extract` for specific URL content
- Search first to find URLs, then extract for detailed analysis
### MongoDB
- MongoDB MCP is READ-ONLY (no write/update/delete operations)
### GitHub CLI
Use `gh` CLI for all GitHub interactions. Never clone repositories to read code.
- **Read file from repo**: `gh api repos/{owner}/{repo}/contents/{path} -q .content | base64 -d`
- **Search code**: `gh search code "query" --repo {owner}/{repo}` or `gh search code "query" --language python`
- **Search repos**: `gh search repos "query" --language python --sort stars`
- **Compare commits**: `gh api repos/{owner}/{repo}/compare/{base}...{head}`
- **View PR**: `gh pr view {number} --repo {owner}/{repo}`
- **View PR diff**: `gh pr diff {number} --repo {owner}/{repo}`
- **View PR comments**: `gh api repos/{owner}/{repo}/pulls/{number}/comments`
- **List commits**: `gh api repos/{owner}/{repo}/commits --jq '.[].sha'`
- **View issue**: `gh issue view {number} --repo {owner}/{repo}`
## Python Coding
- **Before exiting the plan mode**: Never assume anything. Always run tests with `python -c "..."` to verify you hypothesis and bugfix candidates about code behavior, package functions, or data structures before suggesting a plan or exiting the plan mode. This prevents wasted effort on incorrect assumptions.
- **Package Manager**: uv (NOT pip) - defined in pyproject.toml
- Use Google-style docstrings:
- **Summary**: Start with clear, concise summary line in imperative mood ("Calculate", not "Calculates")
- **Args/Attributes**: Document all parameters with types and brief descriptions (no default values)
- **Types**: Use union types with vertical bar `int | str`, uppercase letters for shapes `(N, M)`, lowercase builtins `list`, `dict`, `tuple`, capitalize typing module classes `Any`, `Path`
- **Optional Args**: Mark at end of type `name (type, optional): Description...`
- **Returns**: Always enclose in parentheses `(type)`, NEVER use tuple types - document multiple returns as separate named values
- **Sections**: Optional minimal sections in order: Examples (using >>>), Notes, References (plaintext only, no new ultralytics.com links)
- **Line Wrapping**: Wrap at specified character limit, use zero indentation in docstring content
- **Special Cases**:
- Classes: Include Attributes, omit Methods/Args sections, put all details in class docstring
- `__init__`: Args ONLY, no Examples/Notes/Methods/References
- Functions: Include Args and Returns sections when applicable
- All test functions should be single-line docstrings.
- Indent section titles like "Args:" 0 spaces
- Indent section elements like each argument 4 spaces
- DO NOT CONVERT SINGLE-LINE CLASS DOCSTRINGS TO MULTILINE.
- Optionally include a minimal 'Examples:' section, and improve existing Examples if applicable.
- Do not include default values in argument descriptions, and erase any default values you see in existing arg descriptions.
- **Omissions**: Omit "Returns:" if nothing returned, omit "Args:" if no arguments, avoid "Raises:" unless critical
- Separation of concerns: If-else checks in main should be avoided. Relevant functions should handle inputs checks themselves.
- Super important to integrate new code changes seamlessly within the existing code rather than simply adding more code to current files. Always review any proposed code updates for correctness and conciseness. Focus on writing things in minimal number of lines while avoiding redundant trivial extra lines and comments. For instance don't do:
```python
# Generate comment report only if requested
if include_comments:
comment_report = generate_comments_report(start_date, end_date, team, verbose)
else:
comment_report = ""
print(" Skipping comment analysis (disabled)")
```
Instead do:
```python
comment_report = generate_comments_report(start_date, end_date, team, verbose) if include_comments else ""
```
- Understand existing variable naming, function importing, class method definition, function signature ordering and naming patterns of the given modules and align your implementation with existing patterns. Always exploit existing utilities/optimization/data structures/modules in the project when suggesting something new.
- Redundant duplicate code use is inefficient and unacceptable.
- Never assume anything without testing it with `python3 -c "..."` (don't create file)
- Always consider MongoDB/Gemini/OpenAI/Claude/Voyage API and time costs, and keep them as efficient as possible
- When using 3rd party package functions/classes, find location with `python -c "import pkg; print(pkg.__file__)"`, then use Read tools to explore
- When running Python commands, run `source .venv/bin/activate` to activate the virtual environment before running any scripts or run with uv `uv run python -c "import example"`
## Git and Pull Request Workflows
### Commit Messages
- Format: `{type}: brief description` (max 50 chars first line)
- Types: `feat`, `fix`, `refactor`, `docs`, `style`, `test`, `build`
- Focus on 'why' not 'what' - one logical change per commit
- ONLY analyze staged files (`git diff --cached`), ignore unstaged
- NO test plans in commit messages
### Pull Requests
- PR titles: NO type prefix (unlike commits) - start with capital letter + verb
- Analyze ALL commits with `git diff <base-branch>...HEAD`, not just latest
- Inline links: `[src/file.py:42](src/file.py#L42)` or `[src/file.py:15-42](src/file.py#L15-L42)`
- Self-assign with `-a @me`
- NO test plans in PR body
- Find reviewers: `gh pr list --repo <owner>/<repo> --author @me --limit 5`
### Commands
- `/github-dev:commit-staged` - commit staged changes
- `/github-dev:create-pr` - create pull request
## Citation Verification Rules
**CRITICAL**: Never use unverified citation information. Before adding or referencing any academic citation:
1. **Author Names**: Verify exact author names from the actual paper PDF or official publication page. Do not guess or hallucinate author names based on similar-sounding names.
2. **Publication Venue**: Confirm the exact venue (conference/journal) and year. Papers may be submitted to one venue but published at another (e.g., ICLR submission → ICRA publication).
3. **Paper Title**: Use the exact title from the published version, not preprint titles which may differ.
4. **Cited Claims**: Every specific claim attributed to a paper (e.g., "9% improvement on Synthia", "4.7% on OpenImages") must be verifiable in the actual paper text. If a number cannot be confirmed, use qualitative language instead (e.g., "significant improvements").
5. **BibTeX Keys**: When updating citation keys, search for ALL references to the old key and update them consistently.
**Verification Process**:
- Use web search to find the official publication page (not just preprints)
- Cross-reference author names with the paper's author list
- DBLP is the authoritative source for CS publication metadata
- For specific numerical claims, locate the exact quote or table in the paper
- When uncertain, flag the citation for manual verification rather than guessing
- After adding citations into md or bibtex entries into biblo.bib, fact check all fields from web. Even if you performed fact check before, always do it again after writing the citation in the document.

View File

@@ -0,0 +1,125 @@
# Installation Guide
Complete installation guide for Claude Code, dependencies, and this configuration.
> Use the [plugin marketplace](README.md#installation) to install agents/commands/hooks/MCP. You'll still need to complete prerequisites and create the AGENTS.md symlink.
## Prerequisites
### Claude Code
Install Claude Code using the native installer (no Node.js required):
**macOS/Linux/WSL:**
```bash
# Install via native installer
curl -fsSL https://claude.ai/install.sh | bash
# Or via Homebrew
brew install --cask claude-code
# Verify installation
claude --version
```
**Windows PowerShell:**
```powershell
# Install via native installer
irm https://claude.ai/install.ps1 | iex
# Verify installation
claude --version
```
**Migrate from legacy npm installation:**
```bash
claude install
```
Optionally install IDE extension:
- [Claude Code VSCode extension](https://docs.claude.com/en/docs/claude-code/vs-code) for IDE integration
### OpenAI Codex
Install OpenAI Codex:
```bash
npm install -g @openai/codex
```
Optionally install IDE extension:
- [Codex VSCode extension](https://developers.openai.com/codex/ide) for IDE integration
### Required Tools
#### jq (JSON processor - required for hooks)
**macOS:**
```bash
brew install jq
```
**Ubuntu/Debian:**
```bash
sudo apt-get install jq
```
**Other Linux distributions:**
```bash
# Check your package manager, e.g.:
# sudo yum install jq (RHEL/CentOS)
# sudo pacman -S jq (Arch)
```
#### GitHub CLI (required for pr-manager agent)
**macOS:**
```bash
brew install gh
```
**Ubuntu/Debian:**
```bash
sudo apt-get install gh
```
**Other Linux distributions:**
```bash
# Check your package manager, e.g.:
# sudo yum install gh (RHEL/CentOS)
# sudo pacman -S github-cli (Arch)
```
### Code Quality Tools
```bash
# Python formatting (required for Python hook)
pip install ruff
# Prettier for JS/TS/CSS/JSON/YAML/HTML/Markdown/Shell formatting (required for prettier hooks)
# Note: npm is required for prettier even though Claude Code no longer needs it
npm install -g prettier@3.6.2 prettier-plugin-sh
```
## Post-Installation Setup
### Create Shared Agent Guidance
Create a symlink for cross-tool compatibility ([AGENTS.md](https://agents.md/)):
```bash
ln -s CLAUDE.md AGENTS.md
```
This lets tools like [OpenAI Codex](https://openai.com/codex/), [Gemini CLI](https://github.com/google-gemini/gemini-cli), [Cursor](https://cursor.com), [Github Copilot](https://github.com/features/copilot) and [Qwen Code](https://github.com/QwenLM/qwen-code) reuse the same instructions.

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,496 @@
<div align="center">
<img src="https://github.com/user-attachments/assets/a978cb0a-785d-4a7d-aff2-7e962edd3120" width="10000" alt="Claude Codex Settings Logo">
[![Mentioned in Awesome Claude Code](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/hesreallyhim/awesome-claude-code)
[![Claude Code Plugin](https://img.shields.io/badge/Claude%20Code-Plugin-blue)](#available-plugins)
[![Context7 MCP](https://img.shields.io/badge/Context7%20MCP-Indexed-blue)](https://context7.com/fcakyon/claude-codex-settings)
[![llms.txt](https://img.shields.io/badge/llms.txt-✓-brightgreen)](https://context7.com/fcakyon/claude-codex-settings/llms.txt)
My daily battle-tested Claude [Code](https://github.com/anthropics/claude-code)/[Desktop](https://claude.ai/download) and [OpenAI Codex](https://developers.openai.com/codex) setup with skills, commands, hooks, subagents and MCP servers.
[Installation](#installation) • [Plugins](#plugins) • [Configuration](#configuration) • [Statusline](#statusline) • [References](#references)
</div>
## Installation
> **Prerequisites:** Before installing, ensure you have Claude Code and required tools installed. See [INSTALL.md](INSTALL.md) for complete prerequisites.
Install agents, commands, hooks, skills, and MCP servers via [Claude Code Plugins](https://docs.claude.com/en/docs/claude-code/plugins) system:
```bash
# Add marketplace
/plugin marketplace add fcakyon/claude-codex-settings
# Install plugins (pick what you need)
/plugin install azure-tools@claude-settings # Azure MCP & Skills (40+ services)
/plugin install ccproxy-tools@claude-settings # Use any LLM via ccproxy/LiteLLM
/plugin install claude-tools@claude-settings # Sync CLAUDE.md + allowlist
/plugin install gcloud-tools@claude-settings # GCloud MCP & Skills
/plugin install general-dev@claude-settings # Code simplifier + utilities
/plugin install github-dev@claude-settings # Git workflow + GitHub MCP
/plugin install linear-tools@claude-settings # Linear MCP & Skills
/plugin install mongodb-tools@claude-settings # MongoDB MCP & Skills (read-only)
/plugin install notification-tools@claude-settings # OS notifications
/plugin install paper-search-tools@claude-settings # Paper Search MCP & Skills
/plugin install playwright-tools@claude-settings # Playwright MCP + E2E skill
/plugin install plugin-dev@claude-settings # Plugin development toolkit
/plugin install slack-tools@claude-settings # Slack MCP & Skills
/plugin install statusline-tools@claude-settings # Session + 5H usage statusline
/plugin install supabase-tools@claude-settings # Supabase MCP & Skills
/plugin install tavily-tools@claude-settings # Tavily MCP & Skills
/plugin install ultralytics-dev@claude-settings # Auto-formatting hooks
```
After installing MCP plugins, run `/plugin-name:setup` for configuration (e.g., `/slack-tools:setup`).
Then create symlink for cross-tool compatibility:
```bash
ln -s CLAUDE.md AGENTS.md
```
Restart Claude Code to activate.
## Plugins
<details>
<summary><strong>azure-tools</strong> - Azure MCP & Skills</summary>
40+ Azure services with Azure CLI authentication. Run `/azure-tools:setup` after install.
**Skills:**
- [`azure-usage`](./plugins/azure-tools/skills/azure-usage/SKILL.md) - Best practices for Azure
- [`setup`](./plugins/azure-tools/skills/setup/SKILL.md) - Troubleshooting guide
**Commands:**
- [`/azure-tools:setup`](./plugins/azure-tools/commands/setup.md) - Configure Azure MCP
**MCP:** [`.mcp.json`](./plugins/azure-tools/.mcp.json) | [microsoft/mcp/Azure.Mcp.Server](https://github.com/microsoft/mcp/tree/main/servers/Azure.Mcp.Server)
</details>
<details>
<summary><strong>ccproxy-tools</strong> - Use Claude Code with any LLM</summary>
Configure Claude Code to use ccproxy/LiteLLM with Claude Pro/Max subscription, GitHub Copilot, or other providers. Run `/ccproxy-tools:setup` after install.
**Commands:**
- [`/ccproxy-tools:setup`](./plugins/ccproxy-tools/commands/setup.md) - Configure ccproxy/LiteLLM
**Skills:**
- [`setup`](./plugins/ccproxy-tools/skills/setup/SKILL.md) - Troubleshooting guide
</details>
<details>
<summary><strong>claude-tools</strong> - Sync CLAUDE.md + allowlist + context refresh</summary>
Commands for syncing CLAUDE.md and permissions allowlist from repository, plus context refresh for long conversations.
**Commands:**
- [`/load-claude-md`](./plugins/claude-tools/commands/load-claude-md.md) - Refresh context with CLAUDE.md instructions
- [`/load-frontend-skill`](./plugins/claude-tools/commands/load-frontend-skill.md) - Load frontend design skill from Anthropic
- [`/sync-claude-md`](./plugins/claude-tools/commands/sync-claude-md.md) - Sync CLAUDE.md from GitHub
- [`/sync-allowlist`](./plugins/claude-tools/commands/sync-allowlist.md) - Sync permissions allowlist
</details>
<details>
<summary><strong>gcloud-tools</strong> - GCloud MCP & Skills</summary>
Logs, metrics, and traces. Run `/gcloud-tools:setup` after install.
**Skills:**
- [`gcloud-usage`](./plugins/gcloud-tools/skills/gcloud-usage/SKILL.md) - Best practices for GCloud Logs/Metrics/Traces
- [`setup`](./plugins/gcloud-tools/skills/setup/SKILL.md) - Troubleshooting guide
**Commands:**
- [`/gcloud-tools:setup`](./plugins/gcloud-tools/commands/setup.md) - Configure GCloud MCP
**MCP:** [`.mcp.json`](./plugins/gcloud-tools/.mcp.json) | [google-cloud/observability-mcp](https://github.com/googleapis/gcloud-mcp)
</details>
<details>
<summary><strong>general-dev</strong> - Code simplifier + utilities</summary>
Code quality agent and utility hooks.
**Agent:**
- [`code-simplifier`](./plugins/general-dev/agents/code-simplifier.md) - Ensures code follows conventions
**Hooks:**
- [`enforce_rg_over_grep.py`](./plugins/general-dev/hooks/scripts/enforce_rg_over_grep.py) - Suggest ripgrep
</details>
<details>
<summary><strong>github-dev</strong> - Git workflow agents + commands</summary>
Git and GitHub automation. Run `/github-dev:setup` after install.
**Agents:**
- [`commit-creator`](./plugins/github-dev/agents/commit-creator.md) - Intelligent commit workflow
- [`pr-creator`](./plugins/github-dev/agents/pr-creator.md) - Pull request creation
- [`pr-reviewer`](./plugins/github-dev/agents/pr-reviewer.md) - Code review agent
**Commands:**
- [`/commit-staged`](./plugins/github-dev/commands/commit-staged.md) - Commit staged changes
- [`/create-pr`](./plugins/github-dev/commands/create-pr.md) - Create pull request
- [`/review-pr`](./plugins/github-dev/commands/review-pr.md) - Review pull request
- [`/clean-gone-branches`](./plugins/github-dev/commands/clean-gone-branches.md) - Clean deleted branches
</details>
<details>
<summary><strong>linear-tools</strong> - Linear MCP & Skills</summary>
Issue tracking with OAuth. Run `/linear-tools:setup` after install.
**Skills:**
- [`linear-usage`](./plugins/linear-tools/skills/linear-usage/SKILL.md) - Best practices for Linear
- [`setup`](./plugins/linear-tools/skills/setup/SKILL.md) - Troubleshooting guide
**Commands:**
- [`/linear-tools:setup`](./plugins/linear-tools/commands/setup.md) - Configure Linear MCP
**MCP:** [`.mcp.json`](./plugins/linear-tools/.mcp.json) | [Linear MCP Docs](https://linear.app/docs/mcp)
</details>
<details>
<summary><strong>mongodb-tools</strong> - MongoDB MCP & Skills</summary>
Database exploration (read-only). Run `/mongodb-tools:setup` after install.
**Skills:**
- [`mongodb-usage`](./plugins/mongodb-tools/skills/mongodb-usage/SKILL.md) - Best practices for MongoDB
- [`setup`](./plugins/mongodb-tools/skills/setup/SKILL.md) - Troubleshooting guide
**Commands:**
- [`/mongodb-tools:setup`](./plugins/mongodb-tools/commands/setup.md) - Configure MongoDB MCP
**MCP:** [`.mcp.json`](./plugins/mongodb-tools/.mcp.json) | [mongodb-js/mongodb-mcp-server](https://github.com/mongodb-js/mongodb-mcp-server)
</details>
<details>
<summary><strong>notification-tools</strong> - OS notifications</summary>
Desktop notifications when Claude Code completes tasks.
**Hooks:**
- [`notify.sh`](./plugins/notification-tools/hooks/scripts/notify.sh) - OS notifications on task completion
</details>
<details>
<summary><strong>paper-search-tools</strong> - Paper Search MCP & Skills</summary>
Search papers across arXiv, PubMed, IEEE, Scopus, ACM. Run `/paper-search-tools:setup` after install. Requires Docker.
**Skills:**
- [`paper-search-usage`](./plugins/paper-search-tools/skills/paper-search-usage/SKILL.md) - Best practices for paper search
- [`setup`](./plugins/paper-search-tools/skills/setup/SKILL.md) - Troubleshooting guide
**Commands:**
- [`/paper-search-tools:setup`](./plugins/paper-search-tools/commands/setup.md) - Configure Paper Search MCP
**MCP:** [`.mcp.json`](./plugins/paper-search-tools/.mcp.json) | [mcp/paper-search](https://hub.docker.com/r/mcp/paper-search)
</details>
<details>
<summary><strong>playwright-tools</strong> - Playwright MCP & E2E Testing</summary>
Browser automation with E2E testing skill and responsive design testing agent. Run `/playwright-tools:setup` after install. May require `npx playwright install` for browser binaries.
**Agents:**
- [`responsive-tester`](./plugins/playwright-tools/agents/responsive-tester.md) - Test pages across viewport breakpoints
**Skills:**
- [`playwright-testing`](./plugins/playwright-tools/skills/playwright-testing/SKILL.md) - E2E testing best practices
**Commands:**
- [`/playwright-tools:setup`](./plugins/playwright-tools/commands/setup.md) - Configure Playwright MCP
**MCP:** [`.mcp.json`](./plugins/playwright-tools/.mcp.json) | [microsoft/playwright-mcp](https://github.com/microsoft/playwright-mcp)
</details>
<details>
<summary><strong>plugin-dev</strong> - Plugin development toolkit</summary>
Complete toolkit for building Claude Code plugins with skills, agents, and validation.
**Skills:**
- [`hook-development`](./plugins/plugin-dev/skills/hook-development/SKILL.md) - Create hooks with prompt-based API
- [`mcp-integration`](./plugins/plugin-dev/skills/mcp-integration/SKILL.md) - Configure MCP servers
- [`plugin-structure`](./plugins/plugin-dev/skills/plugin-structure/SKILL.md) - Plugin layout and auto-discovery
- [`plugin-settings`](./plugins/plugin-dev/skills/plugin-settings/SKILL.md) - Per-project configuration
- [`command-development`](./plugins/plugin-dev/skills/command-development/SKILL.md) - Create custom commands
- [`agent-development`](./plugins/plugin-dev/skills/agent-development/SKILL.md) - Build autonomous agents
- [`skill-development`](./plugins/plugin-dev/skills/skill-development/SKILL.md) - Create reusable skills with progressive disclosure
**Agents:**
- [`agent-creator`](./plugins/plugin-dev/agents/agent-creator.md) - AI-assisted agent generation
- [`plugin-validator`](./plugins/plugin-dev/agents/plugin-validator.md) - Validate plugin structure
- [`skill-reviewer`](./plugins/plugin-dev/agents/skill-reviewer.md) - Improve skill quality
**Commands:**
- [`/plugin-dev:create-plugin`](./plugins/plugin-dev/commands/create-plugin.md) - 8-phase guided plugin workflow
- [`/plugin-dev:load-skills`](./plugins/plugin-dev/commands/load-skills.md) - Load all plugin development skills
**Hooks:**
- [`validate_skill.py`](./plugins/plugin-dev/hooks/scripts/validate_skill.py) - Validates SKILL.md structure
- [`validate_mcp_hook_locations.py`](./plugins/plugin-dev/hooks/scripts/validate_mcp_hook_locations.py) - Validates MCP/hook file locations
- [`validate_plugin_paths.py`](./plugins/plugin-dev/hooks/scripts/validate_plugin_paths.py) - Validates plugin.json paths
- [`validate_plugin_structure.py`](./plugins/plugin-dev/hooks/scripts/validate_plugin_structure.py) - Validates plugin directory structure
- [`sync_marketplace_to_plugins.py`](./plugins/plugin-dev/hooks/scripts/sync_marketplace_to_plugins.py) - Syncs marketplace.json to plugin.json
</details>
<details>
<summary><strong>slack-tools</strong> - Slack MCP & Skills</summary>
Message search and channel history. Run `/slack-tools:setup` after install.
**Skills:**
- [`slack-usage`](./plugins/slack-tools/skills/slack-usage/SKILL.md) - Best practices for Slack MCP
- [`setup`](./plugins/slack-tools/skills/setup/SKILL.md) - Troubleshooting guide
**Commands:**
- [`/slack-tools:setup`](./plugins/slack-tools/commands/setup.md) - Configure Slack MCP
**MCP:** [`.mcp.json`](./plugins/slack-tools/.mcp.json) | [ubie-oss/slack-mcp-server](https://github.com/ubie-oss/slack-mcp-server)
</details>
<details>
<summary><strong>statusline-tools</strong> - Session + 5H Usage Statusline</summary>
Cross-platform statusline showing session context %, cost, and account-wide 5H usage with time until reset. Run `/statusline-tools:setup` after install.
**Skills:**
- [`setup`](./plugins/statusline-tools/skills/setup/SKILL.md) - Statusline configuration guide
**Commands:**
- [`/statusline-tools:setup`](./plugins/statusline-tools/commands/setup.md) - Configure statusline
</details>
<details>
<summary><strong>supabase-tools</strong> - Supabase MCP & Skills</summary>
Database management with OAuth. Run `/supabase-tools:setup` after install.
**Skills:**
- [`supabase-usage`](./plugins/supabase-tools/skills/supabase-usage/SKILL.md) - Best practices for Supabase MCP
- [`setup`](./plugins/supabase-tools/skills/setup/SKILL.md) - Troubleshooting guide
**Commands:**
- [`/supabase-tools:setup`](./plugins/supabase-tools/commands/setup.md) - Configure Supabase MCP
**MCP:** [`.mcp.json`](./plugins/supabase-tools/.mcp.json) | [supabase-community/supabase-mcp](https://github.com/supabase-community/supabase-mcp)
</details>
<details>
<summary><strong>tavily-tools</strong> - Tavily MCP & Skills</summary>
Web search and content extraction. Run `/tavily-tools:setup` after install.
**Skills:**
- [`tavily-usage`](./plugins/tavily-tools/skills/tavily-usage/SKILL.md) - Best practices for Tavily Search
- [`setup`](./plugins/tavily-tools/skills/setup/SKILL.md) - Troubleshooting guide
**Commands:**
- [`/tavily-tools:setup`](./plugins/tavily-tools/commands/setup.md) - Configure Tavily MCP
**MCP:** [`.mcp.json`](./plugins/tavily-tools/.mcp.json) | [tavily-ai/tavily-mcp](https://github.com/tavily-ai/tavily-mcp)
</details>
<details>
<summary><strong>ultralytics-dev</strong> - Auto-formatting hooks</summary>
Auto-formatting hooks for Python, JavaScript, Markdown, and Bash.
**Hooks:**
- [`format_python_docstrings.py`](./plugins/ultralytics-dev/hooks/scripts/format_python_docstrings.py) - Google-style docstring formatter
- [`python_code_quality.py`](./plugins/ultralytics-dev/hooks/scripts/python_code_quality.py) - Python code quality with ruff
- [`prettier_formatting.py`](./plugins/ultralytics-dev/hooks/scripts/prettier_formatting.py) - JavaScript/TypeScript/CSS/JSON
- [`markdown_formatting.py`](./plugins/ultralytics-dev/hooks/scripts/markdown_formatting.py) - Markdown formatting
- [`bash_formatting.py`](./plugins/ultralytics-dev/hooks/scripts/bash_formatting.py) - Bash script formatting
</details>
---
## Configuration
<details>
<summary><strong>Claude Code</strong></summary>
Configuration in [`.claude/settings.json`](./.claude/settings.json):
- **Model**: OpusPlan mode (plan: Opus 4.5, execute: Opus 4.5, fast: Sonnet 4.5) - [source](https://github.com/anthropics/claude-code/blob/4dc23d0275ff615ba1dccbdd76ad2b12a3ede591/CHANGELOG.md?plain=1#L61)
- **Environment**: bash working directory, telemetry disabled, MCP output limits
- **Permissions**: bash commands, git operations, MCP tools
- **Statusline**: Custom usage tracking powered by [ccusage](https://ccusage.com/)
- **Plugins**: All plugins enabled
</details>
<details>
<summary><strong>Z.ai (85% cheaper)</strong></summary>
Configuration in [`.claude/settings-zai.json`](./.claude/settings-zai.json) using [Z.ai GLM models via Anthropic-compatible API](https://docs.z.ai/scenario-example/develop-tools/claude):
- **Main model**: GLM-4.6 (dialogue, planning, coding, complex reasoning)
- **Fast model**: GLM-4.5-Air (file search, syntax checking)
- **Cost savings**: 85% cheaper than Claude 4.5 - [source](https://z.ai/blog/glm-4.6)
- **API key**: Get from [z.ai/model-api](https://z.ai/model-api)
</details>
<details>
<summary><strong>Kimi K2</strong></summary>
Run Claude Code with [Kimi K2](https://moonshotai.github.io/Kimi-K2/) via Anthropic-compatible API - [source](https://platform.moonshot.ai/docs/guide/agent-support):
- **Thinking model**: `kimi-k2-thinking-turbo` - High-speed thinking, 256K context
- **Fast model**: `kimi-k2-turbo-preview` - Without extended thinking
- **API key**: Get from [platform.moonshot.ai](https://platform.moonshot.ai)
```bash
export ANTHROPIC_BASE_URL="https://api.moonshot.ai/anthropic/"
export ANTHROPIC_API_KEY="your-moonshot-api-key"
export ANTHROPIC_MODEL=kimi-k2-thinking-turbo
export ANTHROPIC_DEFAULT_OPUS_MODEL=kimi-k2-thinking-turbo
export ANTHROPIC_DEFAULT_SONNET_MODEL=kimi-k2-thinking-turbo
export ANTHROPIC_DEFAULT_HAIKU_MODEL=kimi-k2-thinking-turbo
export CLAUDE_CODE_SUBAGENT_MODEL=kimi-k2-thinking-turbo
```
</details>
<details>
<summary><strong>OpenAI Codex</strong></summary>
Configuration in [`~/.codex/config.toml`](./config.toml):
- **Model**: `gpt-5-codex` with `model_reasoning_effort` set to "high"
- **Provider**: Azure via `responses` API surface
- **Auth**: Project-specific base URL with `env_key` authentication
</details>
<details>
<summary><strong>ccproxy (Use Claude Code with Any LLM)</strong></summary>
Assign any API or model to any task type via [ccproxy](https://github.com/starbased-co/ccproxy):
- **MAX/Pro subscription**: Uses OAuth from your Claude subscription (no API keys)
- **Any provider**: OpenAI, Gemini, Perplexity, local LLMs, or any OpenAI-compatible API
- **Fully customizable**: Assign different models to default, thinking, planning, background tasks
- **SDK support**: Works with Anthropic SDK and LiteLLM SDK beyond Claude Code
</details>
<details>
<summary><strong>VSCode</strong></summary>
Settings in [`.vscode/settings.json`](./.vscode/settings.json):
- **GitHub Copilot**: Custom instructions for automated commit messages and PR descriptions
- **Python**: Ruff formatting with auto-save and format-on-save enabled
- **Terminal**: Cross-platform compatibility configurations
</details>
## Statusline
Simple statusline plugin that uses the official usage API to show account-wide block usage and reset time in real-time. Works for both API and subscription users.
<a href="https://github.com/fcakyon/claude-codex-settings?tab=readme-ov-file#statusline" target="_blank" rel="noopener noreferrer">
<img src="https://github.com/user-attachments/assets/7bbb8e98-2755-46be-b0a4-cc8367a58fdb" width="600">
</a>
<details>
<summary><strong>Setup</strong></summary>
```bash
/plugin marketplace add fcakyon/claude-codex-settings
/plugin install statusline-tools@claude-settings
/statusline-tools:setup
```
**Color coding:**
- 🟢 <50% usage / <1h until reset
- 🟡 50-70% usage / 1-3.5h until reset
- 🔴 70%+ usage / >3.5h until reset
See [Claude Code statusline docs](https://code.claude.com/docs/en/statusline) for details.
</details>
## TODO
- [ ] App [dokploy](https://github.com/Dokploy/dokploy) tools plugin with [dokploy-mcp](https://github.com/Dokploy/mcp) server and deployment best practices skill
- [ ] Add more comprehsensive fullstack-dev plugin with various ocnfigurable skills:
- Frontend: Next.js 16 (App Router, React 19, TypeScript)
- Backend: FastAPI, NodeJS
- Auth: Clerk (Auth, Email), Firebase/Firestore (Auth, DB), Supabase+Resend (Auth, DB, Email) RBAC with org:admin and org:member roles
- Styling: Tailwind CSS v4, [shadcn/ui components](https://github.com/shadcn-ui/ui), [Radix UI primitives](https://github.com/radix-ui/primitives)
- Monitoring: Sentry (errors, APM, session replay, structured logs)
- Analytics: [Web Vitals + Google Analytics](https://nextjs.org/docs/app/api-reference/functions/use-report-web-vitals)
- [ ] Publish `claudesettings.com` as a comprehensive documentation for installing, using and sharing useful Claude-Code settings
- [ ] Rename plugins names to `mongodb-skills`, `github-skills` ...instead of `mongodb-tools`, `github-dev` ... for better UX
- [ ] Add worktree support to github-dev create-pr and commit-staged commands for easier work on multiple branches of the same repo simultaneously
- [ ] Add current repo branch and worktree info into statusline-tools plugin
## References
- [Claude Code](https://github.com/anthropics/claude-code) - Official CLI for Claude
- [Anthropic Skills](https://github.com/anthropics/skills) - Official skill examples
## Thank you for the support!
[![Star History Chart](https://api.star-history.com/svg?repos=fcakyon/claude-codex-settings&type=Date)](https://www.star-history.com/#fcakyon/claude-codex-settings&Date)

View File

@@ -0,0 +1,4 @@
{
"url": "https://context7.com/fcakyon/claude-codex-settings",
"public_key": "pk_3EGAYJgQ2cSag3BprgQGu"
}

View File

@@ -0,0 +1,11 @@
{
"name": "azure-tools",
"version": "2.0.2",
"description": "Azure MCP Server integration for 40+ Azure services with Azure CLI authentication.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,6 @@
{
"azure": {
"command": "npx",
"args": ["-y", "@azure/mcp@latest", "server", "start"]
}
}

View File

@@ -0,0 +1,92 @@
---
description: Configure Azure MCP server with Azure CLI authentication
---
# Azure Tools Setup
Configure the Azure MCP server with Azure CLI authentication.
## Step 1: Check Prerequisites
Check if Azure CLI is installed:
```bash
az --version
```
Check if Node.js is installed:
```bash
node --version
```
Report status based on results.
## Step 2: Show Installation Guide
If Azure CLI is missing, tell the user:
```
Azure CLI is required for Azure MCP authentication.
Install Azure CLI:
- macOS: brew install azure-cli
- Linux: curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
- Windows: winget install Microsoft.AzureCLI
After installing, restart your terminal and run this setup again.
```
If Node.js is missing, tell the user:
```
Node.js 20 LTS or later is required for Azure MCP.
Install Node.js:
- macOS: brew install node@20
- Linux: curl -fsSL https://deb.nodesource.com/setup_20.x | sudo -E bash - && sudo apt-get install -y nodejs
- Windows: winget install OpenJS.NodeJS.LTS
After installing, restart your terminal and run this setup again.
```
## Step 3: Check Authentication
If prerequisites are installed, check Azure login status:
```bash
az account show
```
If not logged in, tell the user:
```
You need to authenticate to Azure.
Run: az login
This opens a browser for authentication. After signing in, you can close the browser.
```
## Step 4: Verify Configuration
After authentication, verify:
1. Read `${CLAUDE_PLUGIN_ROOT}/.mcp.json` to confirm Azure MCP is configured
2. Tell the user the current configuration
## Step 5: Confirm Success
Tell the user:
```
Azure MCP is configured!
IMPORTANT: Restart Claude Code for changes to take effect.
- Exit Claude Code
- Run `claude` again
To verify after restart, run /mcp and check that 'azure' server is connected.
Reference: https://github.com/microsoft/mcp/tree/main/servers/Azure.Mcp.Server
```

View File

@@ -0,0 +1,57 @@
---
name: azure-usage
description: This skill should be used when user asks to "query Azure resources", "list storage accounts", "manage Key Vault secrets", "work with Cosmos DB", "check AKS clusters", "use Azure MCP", or interact with any Azure service.
---
# Azure MCP Best Practices
## Tool Selection
| Task | Tool | Example |
| -------------------- | ---------------------- | ----------------------------------- |
| List resources | `mcp__azure__*_list` | Storage accounts, Key Vault secrets |
| Get resource details | `mcp__azure__*_get` | Container details, database info |
| Create resources | `mcp__azure__*_create` | New secrets, storage containers |
| Query data | `mcp__azure__*_query` | Log Analytics, Cosmos DB |
## Common Operations
### Storage
- `storage_accounts_list` - List storage accounts
- `storage_blobs_list` - List blobs in container
- `storage_blobs_upload` - Upload file to blob
### Key Vault
- `keyvault_secrets_list` - List secrets
- `keyvault_secrets_get` - Get secret value
- `keyvault_secrets_set` - Create/update secret
### Cosmos DB
- `cosmosdb_databases_list` - List databases
- `cosmosdb_containers_list` - List containers
- `cosmosdb_query` - Query documents
### AKS
- `aks_clusters_list` - List AKS clusters
- `aks_nodepools_list` - List node pools
### Monitor
- `monitor_logs_query` - Query Log Analytics
## Authentication
Azure MCP uses Azure Identity SDK. Authenticate via:
- `az login` (Azure CLI - recommended)
- VS Code Azure extension
- Environment variables (service principal)
## Reference
- [Azure MCP Server](https://github.com/microsoft/mcp/tree/main/servers/Azure.Mcp.Server)
- [Supported Services (40+)](https://learn.microsoft.com/azure/developer/azure-mcp-server/)

View File

@@ -0,0 +1,19 @@
---
name: setup
description: This skill should be used when user encounters "Azure MCP error", "Azure authentication failed", "az login required", "Azure CLI not found", or needs help configuring Azure MCP integration.
---
# Azure Tools Setup
Run `/azure-tools:setup` to configure Azure MCP.
## Quick Fixes
- **Authentication failed** - Run `az login` to authenticate
- **Azure CLI not found** - Install Azure CLI first
- **Permission denied** - Check Azure RBAC roles for your account
- **Node.js not found** - Install Node.js 20 LTS or later
## Don't Need Azure MCP?
Disable via `/mcp` command to prevent errors.

View File

@@ -0,0 +1,11 @@
{
"name": "ccproxy-tools",
"version": "2.0.3",
"description": "Use Claude Code with your GitHub Copilot credits, Gemini API, local ollama models or any LLM.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,379 @@
---
description: Configure ccproxy/LiteLLM to use Claude Code with any LLM provider
---
# ccproxy-tools Setup
Configure Claude Code to use ccproxy/LiteLLM with Claude Pro/Max subscription, GitHub Copilot, or other LLM providers.
## Step 1: Check Prerequisites
Check if `uv` is installed:
```bash
which uv
```
If not installed, install it:
```bash
curl -LsSf https://astral.sh/uv/install.sh | sh
```
Then reload shell or run `source ~/.bashrc` (or `~/.zshrc`).
## Step 2: Ask Provider Choice
Use AskUserQuestion:
- question: "Which LLM provider do you want to use with Claude Code?"
- header: "Provider"
- options:
- label: "Claude Pro/Max (ccproxy)"
description: "Use your Claude subscription via OAuth - no API keys needed"
- label: "GitHub Copilot (LiteLLM)"
description: "Use GitHub Copilot subscription via LiteLLM proxy"
- label: "OpenAI API (LiteLLM)"
description: "Use OpenAI models via LiteLLM proxy"
- label: "Gemini API (LiteLLM)"
description: "Use Google Gemini models via LiteLLM proxy"
## Step 3: Install Proxy Tool
### If Claude Pro/Max (ccproxy)
Install and initialize ccproxy:
```bash
uv tool install ccproxy
ccproxy init
```
### If GitHub Copilot, OpenAI, or Gemini (LiteLLM)
Install LiteLLM:
```bash
uv tool install 'litellm[proxy]'
```
## Step 4: Configure LiteLLM (if applicable)
### For GitHub Copilot
Auto-detect VS Code and Copilot versions:
```bash
# Get VS Code version
VSCODE_VERSION=$(code --version 2> /dev/null | head -1 || echo "1.96.0")
# Find Copilot Chat extension version
COPILOT_VERSION=$(ls ~/.vscode/extensions/ 2> /dev/null | grep "github.copilot-chat-" | sed 's/github.copilot-chat-//' | sort -V | tail -1 || echo "0.26.7")
```
Create `~/.litellm/config.yaml` with detected versions:
```yaml
general_settings:
master_key: sk-dummy
litellm_settings:
drop_params: true
model_list:
- model_name: "*"
litellm_params:
model: "github_copilot/*"
extra_headers:
editor-version: "vscode/${VSCODE_VERSION}"
editor-plugin-version: "copilot-chat/${COPILOT_VERSION}"
Copilot-Integration-Id: "vscode-chat"
user-agent: "GitHubCopilotChat/${COPILOT_VERSION}"
```
### For OpenAI API
Ask for OpenAI API key using AskUserQuestion:
- question: "Enter your OpenAI API key (starts with sk-):"
- header: "OpenAI Key"
- options:
- label: "I have it ready"
description: "I'll paste my OpenAI API key"
- label: "Skip for now"
description: "I'll configure it later"
Create `~/.litellm/config.yaml`:
```yaml
general_settings:
master_key: sk-dummy
litellm_settings:
drop_params: true
model_list:
- model_name: "*"
litellm_params:
model: openai/gpt-4o
api_key: ${OPENAI_API_KEY}
```
### For Gemini API
Ask for Gemini API key using AskUserQuestion:
- question: "Enter your Gemini API key:"
- header: "Gemini Key"
- options:
- label: "I have it ready"
description: "I'll paste my Gemini API key"
- label: "Skip for now"
description: "I'll configure it later"
Create `~/.litellm/config.yaml`:
```yaml
general_settings:
master_key: sk-dummy
litellm_settings:
drop_params: true
model_list:
- model_name: "*"
litellm_params:
model: gemini/gemini-2.5-flash
api_key: ${GEMINI_API_KEY}
```
## Step 5: Setup Auto-Start Service
Detect platform and create appropriate service:
### macOS (launchd)
For ccproxy, create `~/Library/LaunchAgents/com.ccproxy.plist`:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.ccproxy</string>
<key>ProgramArguments</key>
<array>
<string>${HOME}/.local/bin/ccproxy</string>
<string>start</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardOutPath</key>
<string>${HOME}/.local/share/ccproxy/stdout.log</string>
<key>StandardErrorPath</key>
<string>${HOME}/.local/share/ccproxy/stderr.log</string>
</dict>
</plist>
```
For LiteLLM, create `~/Library/LaunchAgents/com.litellm.plist`:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.litellm</string>
<key>ProgramArguments</key>
<array>
<string>${HOME}/.local/bin/litellm</string>
<string>--config</string>
<string>${HOME}/.litellm/config.yaml</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardOutPath</key>
<string>${HOME}/.local/share/litellm/stdout.log</string>
<key>StandardErrorPath</key>
<string>${HOME}/.local/share/litellm/stderr.log</string>
</dict>
</plist>
```
Load and start the service:
```bash
launchctl load ~/Library/LaunchAgents/com.ccproxy.plist # or com.litellm.plist
```
### Linux (systemd user service)
For ccproxy, create `~/.config/systemd/user/ccproxy.service`:
```ini
[Unit]
Description=ccproxy LLM Proxy
[Service]
ExecStart=%h/.local/bin/ccproxy start
Restart=always
RestartSec=5
[Install]
WantedBy=default.target
```
For LiteLLM, create `~/.config/systemd/user/litellm.service`:
```ini
[Unit]
Description=LiteLLM Proxy
[Service]
ExecStart=%h/.local/bin/litellm --config %h/.litellm/config.yaml
Restart=always
RestartSec=5
[Install]
WantedBy=default.target
```
Enable and start the service:
```bash
systemctl --user daemon-reload
systemctl --user enable --now ccproxy # or litellm
```
## Step 6: Authenticate (ccproxy only)
For ccproxy, tell the user:
```
The proxy is starting. A browser window will open for authentication.
1. Sign in with your Claude Pro/Max account
2. Authorize the connection
3. Return here after successful authentication
```
Wait for authentication to complete.
## Step 7: Verify Proxy is Running
Check if proxy is healthy:
```bash
curl -s http://localhost:4000/health
```
Retry up to 5 times with 3-second delays if not responding.
If proxy is not healthy after retries:
- Show error and troubleshooting steps
- Do NOT proceed to update settings
- Exit
## Step 8: Confirm Before Updating Settings
Use AskUserQuestion:
- question: "Proxy is running. Ready to configure Claude Code to use it?"
- header: "Configure"
- options:
- label: "Yes, configure now"
description: "Update settings to use the proxy (requires restart)"
- label: "No, not yet"
description: "Keep current settings, I'll configure later"
If user selects "No, not yet":
- Tell them they can run `/ccproxy-tools:setup` again when ready
- Exit without changing settings
## Step 9: Update Settings
1. Read current `~/.claude/settings.json`
2. Create backup at `~/.claude/settings.json.backup`
3. Add to env section based on provider:
For ccproxy:
```json
{
"env": {
"ANTHROPIC_BASE_URL": "http://localhost:4000"
}
}
```
For LiteLLM:
```json
{
"env": {
"ANTHROPIC_BASE_URL": "http://localhost:4000",
"ANTHROPIC_AUTH_TOKEN": "sk-dummy"
}
}
```
4. Write updated settings
## Step 10: Confirm Success
Tell the user:
```
Configuration complete!
IMPORTANT: Restart Claude Code for changes to take effect.
- Exit Claude Code
- Run `claude` again
The proxy will start automatically on system boot.
To verify after restart:
- Claude Code should connect to the proxy at localhost:4000
- Check proxy logs: ~/Library/LaunchAgents/*.log (macOS) or journalctl --user -u ccproxy (Linux)
```
## Recovery Instructions
Always show these recovery instructions:
```
If Claude Code stops working after setup:
1. Check proxy status:
curl http://localhost:4000/health
2. Restart proxy:
macOS: launchctl kickstart -k gui/$(id -u)/com.ccproxy
Linux: systemctl --user restart ccproxy
3. Check proxy logs:
macOS: cat ~/.local/share/ccproxy/stderr.log
Linux: journalctl --user -u ccproxy
4. Restore original settings (removes proxy):
cp ~/.claude/settings.json.backup ~/.claude/settings.json
Or manually edit ~/.claude/settings.json and remove:
- ANTHROPIC_BASE_URL
- ANTHROPIC_AUTH_TOKEN (if present)
```
## Troubleshooting
If proxy setup fails:
```
Common fixes:
1. Port in use - Check if another process uses port 4000: lsof -i :4000
2. Service not starting - Check logs in ~/.local/share/ccproxy/ or ~/.local/share/litellm/
3. Authentication failed - Re-run setup to re-authenticate
4. Permission denied - Ensure ~/.local/bin is in PATH
5. Config invalid - Verify ~/.litellm/config.yaml syntax
```

View File

@@ -0,0 +1,39 @@
---
name: setup
description: This skill should be used when user encounters "ccproxy not found", "LiteLLM connection failed", "localhost:4000 refused", "OAuth failed", "proxy not running", or needs help configuring ccproxy/LiteLLM integration.
---
# ccproxy-tools Setup
Run `/ccproxy-tools:setup` to configure ccproxy/LiteLLM.
## Quick Fixes
- **ccproxy/litellm not found** - Install with `uv tool install 'litellm[proxy]' 'ccproxy'`
- **Connection refused localhost:4000** - Start proxy: `ccproxy start` or `litellm --config ~/.litellm/config.yaml`
- **OAuth failed** - Re-run `ccproxy init` and authenticate via browser
- **Invalid model name** - Check model names in `.claude/settings.json` match LiteLLM config
- **Changes not applied** - Restart Claude Code after updating settings
## Environment Variables
Key settings in `.claude/settings.json``env`:
| Variable | Purpose |
| -------------------------------- | -------------------------------------- |
| `ANTHROPIC_BASE_URL` | Proxy endpoint (http://localhost:4000) |
| `ANTHROPIC_AUTH_TOKEN` | Auth token for proxy |
| `ANTHROPIC_DEFAULT_OPUS_MODEL` | Opus model name |
| `ANTHROPIC_DEFAULT_SONNET_MODEL` | Sonnet model name |
| `ANTHROPIC_DEFAULT_HAIKU_MODEL` | Haiku model name |
## Check Proxy Health
```bash
curl http://localhost:4000/health
```
## Resources
- ccproxy: https://github.com/starbased-co/ccproxy
- LiteLLM: https://docs.litellm.ai

View File

@@ -0,0 +1,11 @@
{
"name": "claude-tools",
"version": "2.0.4",
"description": "Commands for syncing CLAUDE.md, permissions allowlist, and refreshing context from CLAUDE.md files.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,12 @@
---
allowed-tools: Read
description: Refresh context with CLAUDE.md instructions
---
# Load CLAUDE.md
Read and inject CLAUDE.md content into the current context. Useful for refreshing instructions in long conversations.
1. Read `~/.claude/CLAUDE.md` (global instructions)
2. Read `CLAUDE.md` or `AGENTS.md` from the current project directory (whichever exists)
3. Acknowledge that context has been refreshed with these instructions

View File

@@ -0,0 +1,13 @@
---
description: Load frontend design skill from Anthropic
allowed-tools: WebFetch
---
# Load Frontend Design Skill
Load the frontend-design skill from Anthropic's official Claude Code plugins to guide creation of distinctive, production-grade frontend interfaces.
Fetch from:
https://raw.githubusercontent.com/anthropics/claude-code/main/plugins/frontend-design/skills/frontend-design/SKILL.md
Use this guidance when building web components, pages, or applications that require high design quality and avoid generic AI aesthetics.

View File

@@ -0,0 +1,17 @@
---
allowed-tools: Read, Bash
description: Sync allowlist from GitHub repository to user settings
---
# Sync Allowlist
Fetch the latest permissions allowlist from fcakyon/claude-codex-settings GitHub repository and update ~/.claude/settings.json.
Steps:
1. Use `gh api repos/fcakyon/claude-settings/contents/.claude/settings.json --jq '.content' | base64 -d` to fetch settings
2. Parse the JSON and extract the `permissions.allow` array
3. Read the user's `~/.claude/settings.json`
4. Update only the `permissions.allow` field (preserve all other user settings)
5. Write back to `~/.claude/settings.json`
6. Confirm with a message showing count of allowlist entries synced

View File

@@ -0,0 +1,10 @@
---
allowed-tools: Read, Bash
description: Sync CLAUDE.md from GitHub repository
---
# Sync CLAUDE.md
Fetch the latest CLAUDE.md from fcakyon/claude-codex-settings GitHub repository and update ~/.claude/CLAUDE.md.
Use `gh api repos/fcakyon/claude-codex-settings/contents/CLAUDE.md --jq '.content' | base64 -d` to fetch the file content, then write to ~/.claude/CLAUDE.md. Confirm successful update with a message showing the file has been synced.

View File

@@ -0,0 +1,11 @@
{
"name": "gcloud-tools",
"version": "2.0.2",
"description": "Google Cloud Observability MCP for logs, metrics, and traces with best practices skill.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,6 @@
{
"gcloud-observability": {
"command": "npx",
"args": ["-y", "@google-cloud/observability-mcp"]
}
}

View File

@@ -0,0 +1,104 @@
---
description: Configure GCloud CLI authentication
---
# GCloud Tools Setup
**Source:** [googleapis/gcloud-mcp](https://github.com/googleapis/gcloud-mcp)
Check GCloud MCP status and configure CLI authentication if needed.
## Step 1: Check gcloud CLI
Run: `gcloud --version`
If not installed: Continue to Step 2.
If installed: Skip to Step 3.
## Step 2: Install gcloud CLI
Tell the user:
```
Install Google Cloud SDK:
macOS (Homebrew):
brew install google-cloud-sdk
macOS/Linux (Manual):
curl https://sdk.cloud.google.com | bash
exec -l $SHELL
Windows:
Download from: https://cloud.google.com/sdk/docs/install
After install, restart your terminal.
```
## Step 3: Authenticate
Run these commands:
```bash
# Login with your Google account
gcloud auth login
# Set up Application Default Credentials (required for MCP)
gcloud auth application-default login
```
Both commands will open a browser for authentication.
## Step 4: Set Default Project
```bash
# List available projects
gcloud projects list
# Set default project
gcloud config set project YOUR_PROJECT_ID
```
## Step 5: Verify Setup
Run: `gcloud auth list`
Should show your authenticated account with asterisk (\*).
## Step 6: Restart Claude Code
Tell the user:
```
After authentication:
1. Exit Claude Code
2. Run `claude` again
The MCP will use your gcloud credentials.
```
## Troubleshooting
If GCloud MCP fails:
```
Common fixes:
1. ADC not found - Run gcloud auth application-default login
2. Project not set - Run gcloud config set project PROJECT_ID
3. Permission denied - Check IAM roles in Cloud Console
4. Quota exceeded - Check quotas in Cloud Console
5. Token expired - Run gcloud auth application-default login again
```
## Alternative: Disable Plugin
If user doesn't need GCloud integration:
```
To disable this plugin:
1. Run /mcp command
2. Find the gcloud-observability server
3. Disable it
This prevents errors from missing authentication.
```

View File

@@ -0,0 +1,148 @@
---
name: gcloud-usage
description: This skill should be used when user asks about "GCloud logs", "Cloud Logging queries", "Google Cloud metrics", "GCP observability", "trace analysis", or "debugging production issues on GCP".
---
# GCP Observability Best Practices
## Structured Logging
### JSON Log Format
Use structured JSON logging for better queryability:
```json
{
"severity": "ERROR",
"message": "Payment failed",
"httpRequest": { "requestMethod": "POST", "requestUrl": "/api/payment" },
"labels": { "user_id": "123", "transaction_id": "abc" },
"timestamp": "2025-01-15T10:30:00Z"
}
```
### Severity Levels
Use appropriate severity for filtering:
- **DEBUG:** Detailed diagnostic info
- **INFO:** Normal operations, milestones
- **NOTICE:** Normal but significant events
- **WARNING:** Potential issues, degraded performance
- **ERROR:** Failures that don't stop the service
- **CRITICAL:** Failures requiring immediate action
- **ALERT:** Person must take action immediately
- **EMERGENCY:** System is unusable
## Log Filtering Queries
### Common Filters
```
# By severity
severity >= WARNING
# By resource
resource.type="cloud_run_revision"
resource.labels.service_name="my-service"
# By time
timestamp >= "2025-01-15T00:00:00Z"
# By text content
textPayload =~ "error.*timeout"
# By JSON field
jsonPayload.user_id = "123"
# Combined
severity >= ERROR AND resource.labels.service_name="api"
```
### Advanced Queries
```
# Regex matching
textPayload =~ "status=[45][0-9]{2}"
# Substring search
textPayload : "connection refused"
# Multiple values
severity = (ERROR OR CRITICAL)
```
## Metrics vs Logs vs Traces
### When to Use Each
**Metrics:** Aggregated numeric data over time
- Request counts, latency percentiles
- Resource utilization (CPU, memory)
- Business KPIs (orders/minute)
**Logs:** Detailed event records
- Error details and stack traces
- Audit trails
- Debugging specific requests
**Traces:** Request flow across services
- Latency breakdown by service
- Identifying bottlenecks
- Distributed system debugging
## Alert Policy Design
### Alert Best Practices
- **Avoid alert fatigue:** Only alert on actionable issues
- **Use multi-condition alerts:** Reduce noise from transient spikes
- **Set appropriate windows:** 5-15 min for most metrics
- **Include runbook links:** Help responders act quickly
### Common Alert Patterns
**Error rate:**
- Condition: Error rate > 1% for 5 minutes
- Good for: Service health monitoring
**Latency:**
- Condition: P99 latency > 2s for 10 minutes
- Good for: Performance degradation detection
**Resource exhaustion:**
- Condition: Memory > 90% for 5 minutes
- Good for: Capacity planning triggers
## Cost Optimization
### Reducing Log Costs
- **Exclusion filters:** Drop verbose logs at ingestion
- **Sampling:** Log only percentage of high-volume events
- **Shorter retention:** Reduce default 30-day retention
- **Downgrade logs:** Route to cheaper storage buckets
### Exclusion Filter Examples
```
# Exclude health checks
resource.type="cloud_run_revision" AND httpRequest.requestUrl="/health"
# Exclude debug logs in production
severity = DEBUG
```
## Debugging Workflow
1. **Start with metrics:** Identify when issues started
2. **Correlate with logs:** Filter logs around problem time
3. **Use traces:** Follow specific requests across services
4. **Check resource logs:** Look for infrastructure issues
5. **Compare baselines:** Check against known-good periods

View File

@@ -0,0 +1,18 @@
---
name: setup
description: This skill should be used when user encounters "ADC not found", "gcloud auth error", "GCloud MCP error", "Application Default Credentials", "project not set", or needs help configuring GCloud integration.
---
# GCloud Tools Setup
Run `/gcloud-tools:setup` to configure GCloud MCP.
## Quick Fixes
- **ADC not found** - Run `gcloud auth application-default login`
- **Project not set** - Run `gcloud config set project PROJECT_ID`
- **Permission denied** - Check IAM roles in Cloud Console
## Don't Need GCloud?
Disable via `/mcp` command to prevent errors.

View File

@@ -0,0 +1,11 @@
{
"name": "general-dev",
"version": "2.0.2",
"description": "General development tools: code-simplifier agent for pattern analysis, rg preference hook.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,144 @@
---
name: code-simplifier
description: |-
Auto-triggers after TodoWrite tool or before Task tool to ensure new code follows existing patterns for imports, function signatures, naming conventions, base class structure, API key handling, and dependency management. Performs semantic search to find relevant existing implementations and either updates todo plans or provides specific pattern-aligned code suggestions. Examples: <example>Context: Todo "Add Stripe payment integration". Agent finds existing payment handlers use `from utils.api_client import APIClient` and `config.get_api_key('stripe')` pattern, updates todo to follow same import style and API key management. <commentary>Maintains consistent import and API key patterns.</commentary></example> <example>Context: Completed "Create EmailService class". Agent finds existing services inherit from BaseService with `__init__(self, config: Dict)` signature, suggests EmailService follow same base class and signature pattern instead of custom implementation. <commentary>Ensures consistent service architecture.</commentary></example> <example>Context: Todo "Build Redis cache manager". Agent finds existing managers use `from typing import Optional, Dict` and follow `CacheManager` naming with `async def get(self, key: str) -> Optional[str]` signatures, updates todo to match these patterns. <commentary>Aligns function signatures and naming conventions.</commentary></example> <example>Context: Completed "Add database migration". Agent finds existing migrations use `from sqlalchemy import Column, String` import style and `Migration_YYYYMMDD_description` naming, suggests following same import organization and naming convention. <commentary>Maintains consistent dependency management and naming.</commentary></example>
tools:
[
"Glob",
"Grep",
"Read",
"WebSearch",
"WebFetch",
"TodoWrite",
"Bash",
"mcp__tavily__tavily_search",
"mcp__tavily__tavily-extract",
]
color: green
model: inherit
---
You are a **Contextual Pattern Analyzer** that ensures new code follows existing project conventions.
## **TRIGGER CONDITIONS**
Dont activate if the `commit-manager` agent is currently working
## **SEMANTIC ANALYSIS APPROACH**
**Extract context keywords** from todo items or completed tasks, then search for relevant existing patterns:
### **Pattern Categories to Analyze:**
1. **Module Imports**: `from utils.api import APIClient` vs `import requests`
2. **Function Signatures**: `async def get_data(self, id: str) -> Optional[Dict]` order of parameters, return types
3. **Class Naming**: `UserService`, `DataManager`, `BaseValidator`
4. **Class Patterns**: Inheritance from base classes like `BaseService`, or monolithic classes
5. **API Key Handling**: `load_dotenv('VAR_NAME')` vs defined constant in code.
6. **Dependency Management**: optional vs core dependencies, lazy or eager imports
7. **Error Handling**: Try/catch patterns and custom exceptions
8. **Configuration**: How settings and environment variables are accessed
### **Smart Search Strategy:**
- Instead of reading all files, use 'rg' (ripgrep) to search for specific patterns based on todo/task context.
- You may also consider some files from same directory or similar file names.
## **TWO OPERATIONAL MODES**
### **Mode 1: After Todo Creation**
1. **Extract semantic keywords** from todo descriptions
2. **Find existing patterns** using targeted grep searches
3. **Analyze pattern consistency** (imports, naming, structure)
4. **Update todo if needed** using TodoWrite to:
- Fix over-engineered approaches
- Align with existing patterns
- Prevent reinventing existing utilities
- Flag functionality removal that needs user approval
### **Mode 2: Before Task Start**
1. **Identify work context** from existing tasks
2. **Search for similar implementations**
3. **Compare pattern alignment** (signatures, naming, structure)
4. **Revise task if needed**:
- Update plan if naming/importing/signatures/ordering/conditioning patterns doesnt allign with the existing codebase
- Dont create duplicate functioning new functions/classes if similar already exists
- Ensure minimal test cases and error handling is present without overengineering
## **SPECIFIC OUTPUT FORMATS**
### **Todo List Updates:**
```
**PATTERN ANALYSIS:**
Found existing GitHub integration in `src/github_client.py`:
- Uses `from utils.http import HTTPClient` pattern
- API keys via `config.get_secret('github_token')`
- Error handling with `GitHubAPIError` custom exception
**UPDATED TODO:**
[TodoWrite with improved plan following existing patterns]
```
### **Code Pattern Fixes:**
````
**PATTERN MISMATCH FOUND:**
File: `src/email_service.py:10-15`
**Existing Pattern** (from `src/sms_service.py:8`):
```python
from typing import Dict
from config import get_api_key
from utils.base_service import BaseService
class SMSService(BaseService):
def __init__(self, config: Dict):
super().__init__(config)
self.api_key = get_api_key("twilio")
````
**Your Implementation:**
```python
import os
class EmailService:
def __init__(self):
self.key = os.getenv("EMAIL_KEY")
```
**Aligned Fix:**
```python
from typing import Dict
from config import get_api_key
from utils.base_service import BaseService
class EmailService(BaseService):
def __init__(self, config: Dict):
super().__init__(config)
self.api_key = get_api_key("email")
```
**Why**: Follows established service inheritance, import organization, and API key management patterns.
```
## **ANALYSIS WORKFLOW**
1. **Context Extraction** → Keywords from todo/task
2. **Pattern Search** → Find 2-3 most relevant existing files
3. **Consistency Check** → Compare imports, signatures, naming, structure
4. **Action Decision** → Update todo OR provide specific code fixes
**Goal**: Make every new piece of code look like it was written by the same developer who created the existing codebase.
```

View File

@@ -0,0 +1,16 @@
{
"description": "General development hooks for code quality",
"hooks": {
"PreToolUse": [
{
"matcher": "Bash",
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/enforce_rg_over_grep.py"
}
]
}
]
}
}

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
import json
import re
import sys
# Define validation rules as a list of (regex pattern, message) tuples
VALIDATION_RULES = [
(
r"\bgrep\b(?!.*\|)",
"Use 'rg' (ripgrep) instead of 'grep' for better performance and features",
),
(
r"\bfind\s+\S+\s+-name\b",
"Use 'rg --files | rg pattern' or 'rg --files -g pattern' instead of 'find -name' for better performance",
),
]
def validate_command(command: str) -> list[str]:
issues = []
for pattern, message in VALIDATION_RULES:
if re.search(pattern, command):
issues.append(message)
return issues
try:
input_data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
command = tool_input.get("command", "")
if tool_name != "Bash" or not command:
sys.exit(0)
# Validate the command
issues = validate_command(command)
if issues:
for message in issues:
print(f"{message}", file=sys.stderr)
# Exit code 2 blocks tool call and shows stderr to Claude
sys.exit(2)

View File

@@ -0,0 +1,11 @@
{
"name": "github-dev",
"version": "2.0.2",
"description": "GitHub and Git workflow tools: commit-creator, pr-creator, and pr-reviewer agents, slash commands for commits and PRs, GitHub MCP integration, plus skills for PR/commit workflows.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,76 @@
---
name: commit-creator
description: |-
Use this agent when you have staged files ready for commit and need intelligent commit planning and execution. Examples: <example>Context: User has staged multiple files with different types of changes and wants to commit them properly. user: 'I've staged several files with bug fixes and new features. Can you help me commit these?' assistant: 'I'll use the commit-creator agent to analyze your staged files, create an optimal commit plan, and handle the commit process.' <commentary>The user has staged files and needs commit assistance, so use the commit-creator agent to handle the entire commit workflow.</commentary></example> <example>Context: User has made changes and wants to ensure proper commit organization. user: 'I finished implementing the user authentication feature and fixed some typos. Everything is staged.' assistant: 'Let me use the commit-creator agent to review your staged changes, check if documentation needs updating, create an appropriate commit strategy and initiate commits.' <commentary>User has completed work and staged files, perfect time to use commit-creator for proper commit planning.</commentary></example>
tools:
[
"Bash",
"BashOutput",
"Glob",
"Grep",
"Read",
"WebSearch",
"WebFetch",
"TodoWrite",
"mcp__tavily__tavily_search",
"mcp__tavily__tavily_extract",
]
color: blue
skills: commit-workflow
model: inherit
---
You are a Git commit workflow manager, an expert in version control best practices and semantic commit organization. Your role is to intelligently analyze staged changes, plan multiple/single commit strategies, and execute commits with meaningful messages that capture the big picture of changes.
When activated, follow this precise workflow:
1. **Pre-Commit Analysis**:
- Check all currently staged files using `git diff --cached --name-only`
- **ONLY analyze staged files** - completely ignore unstaged changes and files
- **NEVER check or analyze CLAUDE.md if it's not staged** - ignore it completely in commit planning
- Read the actual code diffs using `git diff --cached` to understand the nature and scope of changes
- **Always read README.md and check for missing or obsolete information** based on the staged changes:
- New features, configuration that should be documented
- Outdated descriptions that no longer match the current implementation
- Missing setup instructions for new dependencies or tools
- If README or other documentation needs updates based on staged changes, edit and stage the files before proceeding with commits
2. **Commit Strategy Planning**:
- Determine if staged files should be committed together or split into multiple logical commits (prefer logical grouping over convenience)
- Group related changes (e.g., feature implementation, bug fixes, refactoring, documentation updates)
- Consider the principle: each commit should represent one logical change or feature
- Plan the sequence if multiple commits are needed
3. **Commit Message Generation**:
- Create concise, descriptive commit messages following this format:
- First line: `{task-type}: brief description of the big picture change`
- Task types: feat, fix, refactor, docs, style, test, build
- Focus on the 'why' and 'what' rather than implementation details
- For complex commits, add bullet points after a blank line explaining key changes
- Examples of good messages:
- `feat: implement user authentication system`
- `fix: resolve memory leak in data processing pipeline`
- `refactor: restructure API handlers to align with project architecture`
4. **Execution**:
- Execute commits in the planned sequence using git commands
- **For multi-commit scenarios, use precise git operations to avoid file mixups**:
- Create a temporary list of all staged files using `git diff --cached --name-only`
- For each commit, use `git reset HEAD <file>` to unstage specific files not meant for current commit
- Use `git add <file>` to stage only the files intended for the current commit
- After each commit, re-stage remaining files for subsequent commits
- **CRITICAL**: Always verify the exact files in staging area before each `git commit` command
- After committing, push changes to the remote repository
5. **Quality Assurance**:
- Verify each commit was successful
- Confirm push completed without errors
- Provide a summary of what was committed and pushed
Key principles:
- Always read and understand the actual code changes, not just filenames
- Prioritize logical grouping over convenience
- Write commit messages that will be meaningful to future developers
- Ensure documentation stays synchronized with code changes
- Handle git operations safely with proper error checking

View File

@@ -0,0 +1,119 @@
---
name: pr-creator
description: |-
Use this agent when you need to create a complete pull request workflow including branch creation, committing staged changes, and PR submission. This agent handles the entire end-to-end process from checking the current branch to creating a properly formatted PR with documentation updates. Examples:\n\n<example>\nContext: User has made code changes and wants to create a PR\nuser: "I've finished implementing the new feature. Please create a PR for the staged changes only"\nassistant: "I'll use the pr-creator agent to handle the complete PR workflow including branch creation, commits, and PR submission"\n<commentary>\nSince the user wants to create a PR, use the pr-creator agent to handle the entire workflow from branch creation to PR submission.\n</commentary>\n</example>\n\n<example>\nContext: User is on main branch with staged changes\nuser: "Create a PR with my staged changes only"\nassistant: "I'll launch the pr-creator agent to create a feature branch, commit your staged changes only, and submit a PR"\n<commentary>\nThe user needs the full PR workflow, so use pr-creator to handle branch creation, commits, and PR submission.\n</commentary>\n</example>
tools:
[
"Bash",
"BashOutput",
"Glob",
"Grep",
"Read",
"WebSearch",
"WebFetch",
"TodoWrite",
"SlashCommand",
"mcp__tavily__tavily_search",
"mcp__tavily__tavily_extract",
]
color: cyan
skills: pr-workflow, commit-workflow
model: inherit
---
You are a Git and GitHub PR workflow automation specialist. Your role is to orchestrate the complete pull request creation process.
## Workflow Steps:
1. **Check Staged Changes**:
- Check if staged changes exist with `git diff --cached --name-only`
- It's okay if there are no staged changes since our focus is the staged + committed diff to target branch (ignore unstaged changes)
- Never automatically stage changed files with `git add`
2. **Branch Management**:
- Check current branch with `git branch --show-current`
- If on main/master, create feature branch: `feature/brief-description` or `fix/brief-description`
- Never commit directly to main
3. **Commit Staged Changes**:
- Use `github-dev:commit-creator` subagent to handle if any staged changes, skip this step if no staged changes exist, ignore unstaged changes
- Ensure commits follow project conventions
4. **Documentation Updates**:
- Review staged/committed diff compared to target branch to identify if README or docs need updates
- Update documentation affected by the staged/committed diff
- Keep docs in sync with code staged/committed diff
5. **Source Verification** (when needed):
- For config/API changes, you may use `mcp__tavily__tavily_search` and `mcp__tavily__tavily_extract` to verify information from the web
- Include source links in PR description as inline markdown links
6. **Create Pull Request**:
- **IMPORTANT**: Analyze ALL committed changes in the branch using `git diff <base-branch>...HEAD`
- PR message must describe the complete changeset across all commits, not just the latest commit
- Focus on what changed (ignore unstaged changes) from the perspective of someone reviewing the entire branch
- Create PR with `gh pr create` using:
- `-t` or `--title`: Concise title (max 72 chars)
- `-b` or `--body`: Description with brief summary (few words or 1 sentence) + few bullet points of changes
- `-a @me`: Self-assign (confirmation hook will show actual username)
- `-r <reviewer>`: Add reviewer by finding most probable reviewer from recent PRs:
- Get current repo: `gh repo view --json nameWithOwner -q .nameWithOwner`
- First try: `gh pr list --repo <owner>/<repo> --author @me --limit 5` to find PRs by current author
- If no PRs by author, fallback: `gh pr list --repo <owner>/<repo> --limit 5` to get any recent PRs
- Extract reviewer username from the PR list
- Title should start with capital letter and verb and should not start with conventional commit prefixes (e.g. "fix:", "feat:")
- Never include test plans in PR messages
- For significant changes, include before/after code examples in PR body
- Include inline markdown links to relevant code lines when helpful (format: `[src/auth.py:42](src/auth.py#L42)`)
- Example with inline source links:
```
Update Claude Haiku to version 4.5
- Model ID: claude-3-haiku-20240307 → claude-haiku-4-5-20251001 ([source](https://docs.anthropic.com/en/docs/about-claude/models/overview))
- Pricing: $0.80/$4.00 → $1.00/$5.00 per MTok ([source](https://docs.anthropic.com/en/docs/about-claude/pricing))
- Max output: 4,096 → 64,000 tokens ([source](https://docs.anthropic.com/en/docs/about-claude/models/overview))
```
- Example with code changes and file links:
````
Refactor authentication to use async context manager
- Replace synchronous auth flow with async/await pattern in [src/auth.py:15-42](src/auth.py#L15-L42)
- Add context manager support for automatic cleanup
Before:
```python
def authenticate(token):
session = create_session(token)
return session
````
After:
```python
async def authenticate(token):
async with create_session(token) as session:
return session
```
```
```
## Tool Usage:
- Use `gh` CLI for all PR operations
- Use `mcp__tavily__tavily_search` for web verification
- Use `github-dev:commit-creator` subagent for commit creation
- Use git commands for branch operations
## Output:
Provide clear status updates:
- Branch creation confirmation
- Commit completion status
- Documentation updates made
- PR URL upon completion

View File

@@ -0,0 +1,77 @@
---
name: pr-reviewer
description: |-
Use this agent when user asks to "review a PR", "review pull request", "review this pr", "code review this PR", "check PR #N", or provides a GitHub PR URL for review. Examples:\n\n<example>\nContext: User wants to review the PR for the current branch\nuser: "review this pr"\nassistant: "I'll use the pr-reviewer agent to find and review the PR associated with the current branch."\n<commentary>\nNo PR number given, agent should auto-detect PR from current branch.\n</commentary>\n</example>\n\n<example>\nContext: User wants to review a specific PR by number\nuser: "Review PR #123 in ultralytics/ultralytics"\nassistant: "I'll use the pr-reviewer agent to analyze the pull request and provide a detailed code review."\n<commentary>\nUser explicitly requests PR review with number and repo, trigger pr-reviewer agent.\n</commentary>\n</example>\n\n<example>\nContext: User provides a GitHub PR URL\nuser: "Can you review https://github.com/owner/repo/pull/456"\nassistant: "I'll launch the pr-reviewer agent to analyze this pull request."\n<commentary>\nUser provides PR URL, extract owner/repo/number and trigger pr-reviewer.\n</commentary>\n</example>
model: inherit
color: blue
tools: ["Read", "Grep", "Glob", "Bash"]
---
You are a code reviewer. Find issues that **require fixes**.
Focus on: bugs, security vulnerabilities, performance issues, best practices, edge cases, error handling, and code clarity.
## Critical Rules
1. **Only report actual issues** - If code is correct, say nothing about it
2. **Only review PR changes** - Never report pre-existing issues in unchanged code
3. **Combine related issues** - Same root cause = single comment
4. **Prioritize**: CRITICAL bugs/security > HIGH impact > code quality
5. **Concise and friendly** - One line per issue, no jargon
6. **Use backticks** for code: `function()`, `file.py`
7. **Skip routine changes**: imports, version updates, standard refactoring
8. **Maximum 8 issues** - Focus on most important
## What NOT to Do
- Never say "The fix is correct" or "handled properly" as findings
- Never list empty severity categories
- Never dump full file contents
- Never report issues with "No change needed"
## Review Process
1. **Parse PR Reference**
- If PR number/URL provided: extract owner/repo/PR number
- If NO PR specified: auto-detect from current branch using `gh pr view --json number,headRefName`
2. **Fetch PR Data**
- `gh pr diff <number>` for changes
- `gh pr view <number> --json files` for file list
3. **Skip Files**: `.lock`, `.min.js/css`, `dist/`, `build/`, `vendor/`, `node_modules/`, `_pb2.py`, images
## Severity
-**CRITICAL**: Security vulnerabilities, data loss risks
- ⚠️ **HIGH**: Bugs, breaking changes, significant performance issues
- 💡 **MEDIUM**: Code quality, maintainability, best practices
- 📝 **LOW**: Minor improvements, style issues
- 💭 **SUGGESTION**: Optional improvements (only when truly helpful)
## Output Format
**If issues found:**
```
## PR Review: owner/repo#N
### Issues
❗ **CRITICAL**
- `file.py:42` - Description. Fix: suggestion
⚠️ **HIGH**
- `file.py:55` - Description. Fix: suggestion
💡 **MEDIUM**
- `file.py:60` - Description
**Recommendation**: NEEDS_CHANGES
```
**If NO issues found:**
```
APPROVE - No fixes required
```

View File

@@ -0,0 +1,44 @@
---
description: Clean up local branches deleted from remote
---
# Clean Gone Branches
Remove local git branches that have been deleted from remote (marked as [gone]).
## Instructions
Run the following commands in sequence:
1. **Update remote references:**
```bash
git fetch --prune
```
2. **View branches marked as [gone]:**
```bash
git branch -vv
```
3. **List worktrees (if any):**
```bash
git worktree list
```
4. **Remove worktrees for gone branches (if any):**
```bash
git branch -vv | grep '\[gone\]' | awk '{print $1}' | sed 's/^[*+]*//' | while read -r branch; do
worktree=$(git worktree list | grep "\[$branch\]" | awk '{print $1}')
if [ -n "$worktree" ]; then
echo "Removing worktree: $worktree"
git worktree remove --force "$worktree"
fi
done
```
5. **Delete gone branches:**
```bash
git branch -vv | grep '\[gone\]' | awk '{print $1}' | sed 's/^[*+]*//' | xargs -I {} git branch -D {}
```
Report the results: list of removed worktrees and deleted branches, or notify if no [gone] branches exist.

View File

@@ -0,0 +1,19 @@
---
allowed-tools: Task, Read, Grep, SlashCommand
argument-hint: [context]
description: Commit staged changes with optional context
---
# Commit Staged Changes
Use the commit-creator agent to analyze and commit staged changes with intelligent organization and optimal commit strategy.
## Additional Context
$ARGUMENTS
Task(
description: "Analyze and commit staged changes",
prompt: "Analyze the staged changes and create appropriate commits. Additional context: $ARGUMENTS",
subagent_type: "github-dev:commit-creator"
)

View File

@@ -0,0 +1,19 @@
---
allowed-tools: Task, Read, Grep, SlashCommand, Bash(git checkout:*), Bash(git -C:* checkout:*)
argument-hint: [context]
description: Create pull request with optional context
---
# Create Pull Request
Use the pr-creator agent to handle the complete PR workflow including branch creation, commits, and PR submission.
## Additional Context
$ARGUMENTS
Task(
description: "Create pull request",
prompt: "Handle the complete PR workflow including branch creation, commits, and PR submission. Additional context: $ARGUMENTS",
subagent_type: "github-dev:pr-creator"
)

View File

@@ -0,0 +1,19 @@
---
allowed-tools: Task, Read, Grep, Glob
argument-hint: <PR number or URL>
description: Review a pull request for code quality and issues
---
# Review Pull Request
Use the pr-reviewer agent to analyze the pull request and provide a detailed code review.
## PR Reference
$ARGUMENTS
Task(
description: "Review pull request",
prompt: "Review the pull request and provide detailed feedback on code quality, potential bugs, and suggestions. PR reference: $ARGUMENTS",
subagent_type: "github-dev:pr-reviewer"
)

View File

@@ -0,0 +1,53 @@
---
description: Configure GitHub CLI authentication
---
# GitHub CLI Setup
**Source:** [github/github-mcp-server](https://github.com/github/github-mcp-server)
Configure `gh` CLI for GitHub access.
## Step 1: Check Current Status
Run `gh auth status` to check authentication state.
Report status:
- "GitHub CLI is not authenticated - needs login"
- OR "GitHub CLI is authenticated as <username>"
## Step 2: If Not Authenticated
Guide the user:
```
To authenticate with GitHub CLI:
gh auth login
This will open a browser for GitHub OAuth login.
Select: GitHub.com → HTTPS → Login with browser
```
## Step 3: Verify Setup
After login, verify with:
```bash
gh auth status
gh api user --jq '.login'
```
## Troubleshooting
If `gh` commands fail:
```
Common fixes:
1. Check authentication - gh auth status
2. Re-login - gh auth login
3. Missing scopes - re-auth with required permissions
4. Update gh CLI - brew upgrade gh (or equivalent)
5. Token expired - gh auth refresh
```

View File

@@ -0,0 +1,118 @@
# Claude Command: Update PR Summary
Update PR description with automatically generated summary based on complete changeset.
## Usage
```bash
/update-pr-summary <pr_number> # Update PR description
/update-pr-summary 131 # Example: update PR #131
```
## Workflow Steps
1. **Fetch PR Information**:
- Get PR details using `gh pr view <pr_number> --json title,body,baseRefName,headRefName`
- Identify base branch and head branch from PR metadata
2. **Analyze Complete Changeset**:
- **IMPORTANT**: Analyze ALL committed changes in the branch using `git diff <base-branch>...HEAD`
- PR description must describe the complete changeset across all commits, not just the latest commit
- Focus on what changed from the perspective of someone reviewing the entire branch
- Ignore unstaged changes
3. **Generate PR Description**:
- Create brief summary (1 sentence or few words)
- Add few bullet points of key changes
- For significant changes, include before/after code examples in PR body
- Include inline markdown links to relevant code lines when helpful (format: `[src/auth.py:42](src/auth.py#L42)`)
- For config/API changes, use `mcp__tavily__tavily_search` to verify information and include source links inline
- Never include test plans in PR descriptions
4. **Update PR Title** (if needed):
- Title should start with capital letter and verb
- Should NOT start with conventional commit prefixes (e.g. "fix:", "feat:")
5. **Update PR**:
- Use `gh pr edit <pr_number>` with `--body` (and optionally `--title`) to update the PR
- Use HEREDOC for proper formatting:
```bash
gh pr edit "$(
cat << 'EOF'
[PR description here]
EOF
)" < pr_number > --body
```
## PR Description Format
```markdown
[Brief summary in 1 sentence or few words]
- [Key change 1 with inline code reference if helpful]
- [Key change 2 with source link if config/API change]
- [Key change 3]
[Optional: Before/after code examples for significant changes]
```
## Examples
### Example 1: Config/API Change with Source Links
```markdown
Update Claude Haiku to version 4.5
- Model ID: claude-3-haiku-20240307 → claude-haiku-4-5-20251001 ([source](https://docs.anthropic.com/en/docs/about-claude/models/overview))
- Pricing: $0.80/$4.00 → $1.00/$5.00 per MTok ([source](https://docs.anthropic.com/en/docs/about-claude/pricing))
- Max output: 4,096 → 64,000 tokens ([source](https://docs.anthropic.com/en/docs/about-claude/models/overview))
```
### Example 2: Code Changes with File Links
````markdown
Refactor authentication to use async context manager
- Replace synchronous auth flow with async/await pattern in [src/auth.py:15-42](src/auth.py#L15-L42)
- Add context manager support for automatic cleanup
Before:
```python
def authenticate(token):
session = create_session(token)
return session
```
After:
```python
async def authenticate(token):
async with create_session(token) as session:
return session
```
````
### Example 3: Simple Feature Addition
```markdown
Add user profile export functionality
- Export user data to JSON format in [src/export.py:45-78](src/export.py#L45-L78)
- Add CLI command `/export-profile` in [src/cli.py:123](src/cli.py#L123)
- Include email, preferences, and activity history in export
```
## Error Handling
**Pre-Analysis Verification**:
- Verify PR exists and is accessible
- Check tool availability (`gh auth status`)
- Confirm authentication status
**Common Issues**:
- Invalid PR number → List available PRs
- Missing tools → Provide setup instructions
- Auth issues → Guide through authentication

View File

@@ -0,0 +1,20 @@
{
"description": "Git workflow confirmation hooks for GitHub operations",
"hooks": {
"PreToolUse": [
{
"matcher": "Bash",
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/git_commit_confirm.py"
},
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/gh_pr_create_confirm.py"
}
]
}
]
}
}

View File

@@ -0,0 +1,135 @@
#!/usr/bin/env python3
"""PreToolUse hook: show confirmation modal before creating GitHub PR via gh CLI."""
import json
import re
import subprocess
import sys
def parse_gh_pr_create(command: str) -> dict[str, str]:
"""Parse gh pr create command to extract PR parameters.
Args:
command (str): The gh pr create command string
Returns:
(dict): Dictionary with title, body, assignee, reviewer keys
"""
params = {"title": "", "body": "", "assignee": "", "reviewer": ""}
# Extract title (-t or --title)
title_match = re.search(r'(?:-t|--title)\s+["\']([^"\']+)["\']', command)
if title_match:
params["title"] = title_match.group(1)
# Extract body (-b or --body) - handle HEREDOC syntax first, then simple quotes
heredoc_match = re.search(
r'(?:-b|--body)\s+"?\$\(cat\s+<<["\']?(\w+)["\']?\s+(.*?)\s+\1\s*\)"?',
command,
re.DOTALL,
)
if heredoc_match:
params["body"] = heredoc_match.group(2).strip()
else:
body_match = re.search(r'(?:-b|--body)\s+"([^"]+)"', command)
if body_match:
params["body"] = body_match.group(1)
# Extract assignee (-a or --assignee)
assignee_match = re.search(r'(?:-a|--assignee)\s+([^\s]+)', command)
if assignee_match:
params["assignee"] = assignee_match.group(1)
# Extract reviewer (-r or --reviewer)
reviewer_match = re.search(r'(?:-r|--reviewer)\s+([^\s]+)', command)
if reviewer_match:
params["reviewer"] = reviewer_match.group(1)
return params
def resolve_username(assignee: str) -> str:
"""Resolve @me to actual GitHub username.
Args:
assignee (str): Assignee value from command (may be @me)
Returns:
(str): Resolved username or original value
"""
if assignee == "@me":
try:
result = subprocess.run(
["gh", "api", "user", "--jq", ".login"],
capture_output=True,
text=True,
timeout=5,
)
if result.returncode == 0:
return result.stdout.strip()
except (subprocess.TimeoutExpired, FileNotFoundError):
pass
return assignee
def format_confirmation_message(params: dict[str, str]) -> str:
"""Format PR parameters into readable confirmation message.
Args:
params (dict): Dictionary with title, body, assignee, reviewer
Returns:
(str): Formatted confirmation message
"""
# Truncate body if too long
body = params["body"]
if len(body) > 500:
body = body[:500] + "..."
# Resolve assignee
assignee = resolve_username(params["assignee"]) if params["assignee"] else "None"
lines = ["📝 Create Pull Request?", "", f"Title: {params['title']}", ""]
if body:
lines.extend(["Body:", body, ""])
lines.append(f"Assignee: {assignee}")
if params["reviewer"]:
lines.append(f"Reviewer: {params['reviewer']}")
return "\n".join(lines)
try:
input_data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
command = tool_input.get("command", "")
# Only handle gh pr create commands
if tool_name != "Bash" or not command.strip().startswith("gh pr create"):
sys.exit(0)
# Parse PR parameters
params = parse_gh_pr_create(command)
# Format confirmation message
message = format_confirmation_message(params)
# Return JSON with ask decision
output = {
"hookSpecificOutput": {
"hookEventName": "PreToolUse",
"permissionDecision": "ask",
"permissionDecisionReason": message,
}
}
print(json.dumps(output))
sys.exit(0)

View File

@@ -0,0 +1,162 @@
#!/usr/bin/env python3
"""PreToolUse hook: show confirmation modal before creating git commit."""
import json
import re
import subprocess
import sys
def parse_git_commit_message(command: str) -> dict[str, str]:
"""Parse git commit command to extract commit message.
Args:
command (str): The git commit command string
Returns:
(dict): Dictionary with message and is_amend keys
"""
params = {"message": "", "is_amend": False}
# Check for --amend flag
params["is_amend"] = "--amend" in command
# Try to extract heredoc format: git commit -m "$(cat <<'EOF' ... EOF)"
heredoc_match = re.search(r"<<'EOF'\s*\n(.*?)\nEOF", command, re.DOTALL)
if heredoc_match:
params["message"] = heredoc_match.group(1).strip()
return params
# Try to extract simple -m "message" format
simple_matches = re.findall(r'(?:-m|--message)\s+["\']([^"\']+)["\']', command)
if simple_matches:
# Join multiple -m flags with double newlines
params["message"] = "\n\n".join(simple_matches)
return params
return params
def get_staged_files() -> tuple[list[str], str]:
"""Get list of staged files and diff stats.
Returns:
(tuple): (list of file paths, diff stats string)
"""
try:
# Get list of staged files
files_result = subprocess.run(
["git", "diff", "--cached", "--name-only"],
capture_output=True,
text=True,
timeout=5,
)
# Get diff stats
stats_result = subprocess.run(
["git", "diff", "--cached", "--stat"],
capture_output=True,
text=True,
timeout=5,
)
files = []
if files_result.returncode == 0:
files = [f for f in files_result.stdout.strip().split("\n") if f]
stats = ""
if stats_result.returncode == 0:
# Get last line which contains the summary
stats_lines = stats_result.stdout.strip().split("\n")
if stats_lines:
stats = stats_lines[-1]
return files, stats
except (subprocess.TimeoutExpired, FileNotFoundError):
return [], ""
def format_confirmation_message(message: str, is_amend: bool, files: list[str], stats: str) -> str:
"""Format commit parameters into readable confirmation message.
Args:
message (str): Commit message
is_amend (bool): Whether this is an amend commit
files (list): List of staged file paths
stats (str): Diff statistics string
Returns:
(str): Formatted confirmation message
"""
lines = []
# Header
if is_amend:
lines.append("💾 Amend Previous Commit?")
else:
lines.append("💾 Create Commit?")
lines.append("")
# Commit message
if message:
lines.append("Message:")
lines.append(message)
lines.append("")
# Files
if files:
lines.append(f"Files to be committed ({len(files)}):")
# Show first 15 files, truncate if more
display_files = files[:15]
for f in display_files:
lines.append(f"- {f}")
if len(files) > 15:
lines.append(f"... and {len(files) - 15} more files")
lines.append("")
# Stats
if stats:
lines.append("Stats:")
lines.append(stats)
# Warning if no files staged
if not files:
lines.append("⚠️ No files staged for commit")
return "\n".join(lines)
try:
input_data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
command = tool_input.get("command", "")
# Only handle git commit commands
if tool_name != "Bash" or not command.strip().startswith("git commit"):
sys.exit(0)
# Parse commit message
params = parse_git_commit_message(command)
# Get staged files and stats
files, stats = get_staged_files()
# Format confirmation message
message = format_confirmation_message(params["message"], params["is_amend"], files, stats)
# Return JSON with ask decision
output = {
"hookSpecificOutput": {
"hookEventName": "PreToolUse",
"permissionDecision": "ask",
"permissionDecisionReason": message,
}
}
print(json.dumps(output))
sys.exit(0)

View File

@@ -0,0 +1,51 @@
---
name: commit-workflow
description: This skill should be used when user asks to "commit these changes", "write commit message", "stage and commit", "create a commit", "commit staged files", or runs /commit-staged or /commit-creator commands.
---
# Commit Workflow
Complete workflow for creating commits following project standards.
## Process
1. **Use commit-creator agent**
- Run `/commit-staged [context]` for automated commit handling
- Or follow manual steps below
2. **Analyze staged files only**
- Check all staged files: `git diff --cached --name-only`
- Read diffs: `git diff --cached`
- Completely ignore unstaged changes
3. **Commit message format**
- First line: `{task-type}: brief description of the big picture change`
- Task types: `feat`, `fix`, `refactor`, `docs`, `style`, `test`, `build`
- Focus on 'why' and 'what', not implementation details
- For complex changes, add bullet points after blank line
4. **Message examples**
- `feat: implement user authentication system`
- `fix: resolve memory leak in data processing pipeline`
- `refactor: restructure API handlers to align with project architecture`
5. **Documentation update**
- Check README.md for:
- New features that should be documented
- Outdated descriptions no longer matching implementation
- Missing setup instructions for new dependencies
- Update as needed based on staged changes
6. **Execution**
- Commit uses HEREDOC syntax for proper formatting
- Verify commit message has correct format
- Don't add test plans to commit messages
## Best Practices
- Analyze staged files before writing message
- Keep first line concise (50 chars recommended)
- Use active voice in message
- Reference related code if helpful
- One logical change per commit
- Ensure README reflects implementation

View File

@@ -0,0 +1,73 @@
---
name: pr-workflow
description: This skill should be used when user asks to "create a PR", "make a pull request", "open PR for this branch", "submit changes as PR", "push and create PR", or runs /create-pr or /pr-creator commands.
---
# Pull Request Workflow
Complete workflow for creating pull requests following project standards.
## Process
1. **Verify staged changes** exist with `git diff --cached --name-only`
2. **Branch setup**
- If on main/master, create feature branch first: `feature/brief-description` or `fix/brief-description`
- Use `github-dev:commit-creator` subagent to handle staged changes if needed
3. **Documentation check**
- Update README.md or docs based on changes compared to target branch
- For config/API changes, use `mcp__tavily__tavily_search` to verify info and include sources
4. **Analyze all commits**
- Use `git diff <base-branch>...HEAD` to review complete changeset
- PR message must describe all commits, not just latest
- Focus on what changed from reviewer perspective
5. **Create PR**
- Use `/pr-creator` agent or `gh pr create` with parameters:
- `-t` (title): Start with capital letter, use verb, NO "fix:" or "feat:" prefix
- `-b` (body): Brief summary + bullet points with inline markdown links
- `-a @me` (self-assign)
- `-r <reviewer>`: Find via `gh pr list --repo <owner>/<repo> --author @me --limit 5`
6. **PR Body Guidelines**
- **Summary**: Few words or 1 sentence describing changes
- **Changes**: Bullet points with inline links `[src/auth.py:42](src/auth.py#L42)`
- **Examples**: For significant changes, include before/after code examples
- **No test plans**: Never mention test procedures in PR
## Examples
### With inline source links:
```
Update Claude Haiku to version 4.5
- Model ID: claude-3-haiku-20240307 → claude-haiku-4-5-20251001 ([source](https://docs.anthropic.com/en/docs/about-claude/models/overview))
- Pricing: $0.80/$4.00 → $1.00/$5.00 per MTok ([source](https://docs.anthropic.com/en/docs/about-claude/pricing))
- Max output: 4,096 → 64,000 tokens ([source](https://docs.anthropic.com/en/docs/about-claude/models/overview))
```
### With code changes:
```
Refactor authentication to use async context manager
- Replace synchronous auth flow with async/await pattern in [src/auth.py:15-42](src/auth.py#L15-L42)
- Add context manager support for automatic cleanup
Before:
\`\`\`python
def authenticate(token):
session = create_session(token)
return session
\`\`\`
After:
\`\`\`python
async def authenticate(token):
async with create_session(token) as session:
return session
\`\`\`
```

View File

@@ -0,0 +1,32 @@
---
name: setup
description: This skill should be used when the user asks "how to setup GitHub CLI", "configure gh", "gh auth not working", "GitHub CLI connection failed", "gh CLI error", or needs help with GitHub authentication.
---
# GitHub CLI Setup
Configure `gh` CLI for GitHub access.
## Quick Setup
```bash
gh auth login
```
Select: GitHub.com → HTTPS → Login with browser
## Verify Authentication
```bash
gh auth status
gh api user --jq '.login'
```
## Troubleshooting
If `gh` commands fail:
1. **Check authentication** - `gh auth status`
2. **Re-login if needed** - `gh auth login`
3. **Check scopes** - Ensure token has repo access
4. **Update gh** - `brew upgrade gh` or equivalent

View File

@@ -0,0 +1,11 @@
{
"name": "linear-tools",
"version": "2.0.2",
"description": "Linear MCP integration for issue tracking with workflow best practices skill.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,6 @@
{
"linear": {
"type": "sse",
"url": "https://mcp.linear.app/sse"
}
}

View File

@@ -0,0 +1,74 @@
---
description: Configure Linear OAuth authentication
---
# Linear Tools Setup
**Source:** [Linear MCP Docs](https://linear.app/docs/mcp)
Check Linear MCP status and configure OAuth if needed.
## Step 1: Test Current Setup
Try listing teams using `mcp__linear__list_teams`.
If successful: Tell user Linear is configured and working.
If fails with authentication error: Continue to Step 2.
## Step 2: OAuth Authentication
Linear uses OAuth - no API keys needed. Tell the user:
```
Linear MCP uses OAuth authentication.
To authenticate:
1. Run the /mcp command in Claude Code
2. Find the "linear" server in the list
3. Click "Authenticate" or similar option
4. A browser window will open
5. Sign in to Linear and authorize access
```
## Step 3: Complete OAuth Flow
After user clicks authenticate:
- Browser opens to Linear authorization page
- User signs in with their Linear account
- User approves the permission request
- Browser shows success message
- Claude Code receives the token automatically
## Step 4: Verify Setup
Try listing teams again using `mcp__linear__list_teams`.
If successful: Linear is now configured.
## Troubleshooting
If OAuth fails:
```
Common fixes:
1. Clear browser cookies for linear.app
2. Try a different browser
3. Disable browser extensions
4. Re-run /mcp and authenticate again
5. Restart Claude Code and try again
```
## Alternative: Disable Plugin
If user doesn't need Linear integration:
```
To disable this plugin:
1. Run /mcp command
2. Find the linear server
3. Disable it
This prevents errors from missing authentication.
```

View File

@@ -0,0 +1,181 @@
---
name: linear-usage
description: This skill should be used when user asks about "Linear issues", "issue tracking best practices", "sprint planning", "Linear project management", or "creating Linear issues".
---
# Linear & Issue Tracking Best Practices
## Issue Writing Guidelines
### Clear Titles
Write titles that describe the problem or outcome:
- **Good:** "Users can't reset password on mobile Safari"
- **Bad:** "Password bug"
- **Good:** "Add export to CSV for user reports"
- **Bad:** "Export feature"
### Effective Descriptions
Include:
1. **Context:** Why this matters
2. **Current behavior:** What happens now (for bugs)
3. **Expected behavior:** What should happen
4. **Steps to reproduce:** For bugs
5. **Acceptance criteria:** Definition of done
### Templates
**Bug report:**
```
## Description
Brief description of the issue.
## Steps to Reproduce
1. Step one
2. Step two
3. Issue occurs
## Expected Behavior
What should happen.
## Actual Behavior
What happens instead.
## Environment
- Browser/OS
- User type
```
**Feature request:**
```
## Problem Statement
What problem does this solve?
## Proposed Solution
High-level approach.
## Acceptance Criteria
- [ ] Criterion 1
- [ ] Criterion 2
```
## Label Taxonomy
### Recommended Labels
**Type labels:**
- `bug` - Something isn't working
- `feature` - New functionality
- `improvement` - Enhancement to existing feature
- `chore` - Maintenance, refactoring
**Area labels:**
- `frontend`, `backend`, `api`, `mobile`
- Or by feature area: `auth`, `payments`, `onboarding`
**Status labels (if not using workflow states):**
- `needs-triage`, `blocked`, `needs-design`
### Label Best Practices
- Keep label count manageable (15-25 total)
- Use consistent naming convention
- Color-code by category
- Review and prune quarterly
## Priority and Estimation
### Priority Levels
- **Urgent (P0):** Production down, security issue
- **High (P1):** Major functionality broken, key deadline
- **Medium (P2):** Important but not urgent
- **Low (P3):** Nice to have, minor improvements
### Estimation Tips
- Use relative sizing (points) not hours
- Estimate complexity, not time
- Include testing and review time
- Re-estimate if scope changes significantly
## Cycle/Sprint Planning
### Cycle Best Practices
- **Duration:** 1-2 weeks typically
- **Capacity:** Plan for 70-80% to allow for interrupts
- **Carryover:** Review why items didn't complete
- **Retrospective:** Brief review at cycle end
### Planning Process
1. Review backlog priorities
2. Pull issues into cycle
3. Break down large items (>5 points)
4. Assign owners
5. Identify dependencies and blockers
## Project Organization
### Projects vs Initiatives
**Projects:** Focused, time-bound work (1-3 months)
- Single team typically
- Clear deliverable
- Example: "Mobile app v2 launch"
**Initiatives:** Strategic themes
- May span multiple projects
- Longer-term goals
- Example: "Platform reliability"
### Roadmap Tips
- Keep roadmap items high-level
- Update status regularly
- Link to detailed issues/projects
- Share with stakeholders
## Triage Workflows
### Triage Process
1. **Review new issues daily**
2. **Add missing information** (labels, priority)
3. **Assign to appropriate team/person**
4. **Link related issues**
5. **Move to backlog or close if invalid**
### Closing Issues
Close with clear reason:
- **Completed:** Work is done
- **Duplicate:** Link to original
- **Won't fix:** Explain why
- **Invalid:** Missing info, not reproducible
## GitHub Integration
### Linking PRs to Issues
- Reference Linear issue ID in PR title or description
- Linear auto-links and updates status
- Use branch names with issue ID for automatic linking
### Workflow Automation
- PR opened → Issue moves to "In Progress"
- PR merged → Issue moves to "Done"
- Configure in Linear settings

View File

@@ -0,0 +1,18 @@
---
name: setup
description: This skill should be used when user encounters "Linear auth failed", "Linear OAuth error", "Linear MCP error", "Linear not working", "unauthorized", or needs help configuring Linear integration.
---
# Linear Tools Setup
Run `/linear-tools:setup` to configure Linear MCP.
## Quick Fixes
- **OAuth failed** - Re-authenticate via `/mcp` command
- **Unauthorized** - Check Linear workspace permissions
- **Token expired** - Re-run OAuth flow
## Don't Need Linear?
Disable via `/mcp` command to prevent errors.

View File

@@ -0,0 +1,11 @@
{
"name": "mongodb-tools",
"version": "2.0.3",
"description": "MongoDB MCP integration (read-only) for database exploration with best practices skill.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,10 @@
{
"mongodb": {
"command": "npx",
"args": ["-y", "mongodb-mcp-server"],
"env": {
"MDB_MCP_CONNECTION_STRING": "REPLACE_WITH_CONNECTION_STRING",
"MDB_MCP_READ_ONLY": "true"
}
}
}

View File

@@ -0,0 +1,112 @@
---
description: Configure MongoDB MCP connection
---
# MongoDB Tools Setup
**Source:** [mongodb-js/mongodb-mcp-server](https://github.com/mongodb-js/mongodb-mcp-server)
Configure the MongoDB MCP server with your connection string.
## Step 1: Check Current Status
Read the MCP configuration from `${CLAUDE_PLUGIN_ROOT}/.mcp.json`.
Check if MongoDB is configured:
- If `mongodb.env.MDB_MCP_CONNECTION_STRING` contains `REPLACE_WITH_CONNECTION_STRING`, it needs configuration
- If it contains a value starting with `mongodb://` or `mongodb+srv://`, already configured
Report status:
- "MongoDB MCP is not configured - needs a connection string"
- OR "MongoDB MCP is already configured"
## Step 2: Show Setup Guide
Tell the user:
```
To configure MongoDB MCP, you need a connection string.
Formats:
- Atlas: mongodb+srv://username:password@cluster.mongodb.net/database
- Local: mongodb://localhost:27017/database
Get Atlas connection string:
1. Go to cloud.mongodb.com
2. Navigate to your cluster
3. Click "Connect" → "Drivers"
4. Copy connection string
Note: MCP runs in READ-ONLY mode.
Don't need MongoDB MCP? Disable it via /mcp command.
```
## Step 3: Ask for Connection String
Use AskUserQuestion:
- question: "Do you have your MongoDB connection string ready?"
- header: "MongoDB"
- options:
- label: "Yes, I have it"
description: "I have my MongoDB connection string ready to paste"
- label: "No, skip for now"
description: "I'll configure it later"
If user selects "No, skip for now":
- Tell them they can run `/mongodb-tools:setup` again when ready
- Remind them they can disable MongoDB MCP via `/mcp` if not needed
- Exit
If user selects "Yes" or provides connection string via "Other":
- If they provided connection string in "Other" response, use that
- Otherwise, ask them to paste the connection string
## Step 4: Validate Connection String
Validate the provided connection string:
- Must start with `mongodb://` or `mongodb+srv://`
If invalid:
- Show error: "Invalid connection string format. Must start with 'mongodb://' or 'mongodb+srv://'"
- Ask if they want to try again or skip
## Step 5: Update Configuration
1. Read current `${CLAUDE_PLUGIN_ROOT}/.mcp.json`
2. Create backup at `${CLAUDE_PLUGIN_ROOT}/.mcp.json.backup`
3. Update `mongodb.env.MDB_MCP_CONNECTION_STRING` value to the actual connection string
4. Write updated configuration back to `${CLAUDE_PLUGIN_ROOT}/.mcp.json`
## Step 6: Confirm Success
Tell the user:
```
MongoDB MCP configured successfully!
IMPORTANT: Restart Claude Code for changes to take effect.
- Exit Claude Code
- Run `claude` again
To verify after restart, run /mcp and check that 'mongodb' server is connected.
```
## Troubleshooting
If MongoDB MCP fails after configuration:
```
Common fixes:
1. Authentication failed - Add ?authSource=admin to connection string
2. Network timeout - Whitelist IP in Atlas Network Access settings
3. Wrong credentials - Verify username/password, special chars need URL encoding
4. SSL/TLS errors - For Atlas, ensure mongodb+srv:// is used
```

View File

@@ -0,0 +1,112 @@
---
name: mongodb-usage
description: This skill should be used when user asks to "query MongoDB", "show database collections", "get collection schema", "list MongoDB databases", "search records in MongoDB", or "check database indexes".
---
# MongoDB Best Practices
## MCP Limitation
**This MCP operates in READ-ONLY mode.** No write, update, or delete operations are possible.
## Schema Design Patterns
### Embedding vs Referencing
**Embed when:**
- Data is accessed together frequently
- Child documents are bounded (won't grow unbounded)
- One-to-few relationships
- Data doesn't change frequently
**Reference when:**
- Data is accessed independently
- Many-to-many relationships
- Documents would exceed 16MB limit
- Frequent updates to referenced data
### Common Patterns
**Subset pattern:** Store frequently accessed subset in parent, full data in separate collection.
**Bucket pattern:** Group time-series data into buckets (e.g., hourly readings in one document).
**Computed pattern:** Store pre-computed values for expensive calculations.
## Index Strategies
### Index Guidelines
- Index fields used in queries, sorts, and aggregation $match stages
- Compound indexes support queries on prefix fields
- Covered queries (all fields in index) are fastest
- Too many indexes slow writes
### Index Types
- **Single field:** Basic index on one field
- **Compound:** Multiple fields, order matters for queries
- **Multikey:** Automatically created for array fields
- **Text:** Full-text search on string content
- **TTL:** Auto-expire documents after time period
### ESR Rule
For compound indexes, order fields by:
1. **E**quality (exact match fields)
2. **S**ort (sort order fields)
3. **R**ange (range query fields like $gt, $lt)
## Aggregation Pipeline
### Performance Tips
- Put `$match` and `$project` early to reduce documents
- Use `$limit` early when possible
- Avoid `$lookup` on large collections without indexes
- Use `$facet` for multiple aggregations in one query
### Common Stages
```javascript
// Filter documents
{ $match: { status: "active" } }
// Reshape documents
{ $project: { name: 1, total: { $sum: "$items.price" } } }
// Group and aggregate
{ $group: { _id: "$category", count: { $sum: 1 } } }
// Sort results
{ $sort: { count: -1 } }
// Join collections
{ $lookup: { from: "orders", localField: "_id", foreignField: "userId", as: "orders" } }
```
## Connection Best Practices
### Connection String Formats
- **Atlas:** `mongodb+srv://user:pass@cluster.mongodb.net/database`
- **Local:** `mongodb://localhost:27017/database`
- **Replica set:** `mongodb://host1,host2,host3/database?replicaSet=rs0`
### Connection Pooling
- Use connection pooling in applications (default in drivers)
- Set appropriate pool size for your workload
- Don't create new connections per request
## Anti-Patterns to Avoid
- **Unbounded arrays:** Arrays that grow without limit
- **Massive documents:** Documents approaching 16MB
- **Too many collections:** Use embedding instead
- **Missing indexes:** Queries doing collection scans
- **$where operator:** Use aggregation instead for security
- **Storing files in documents:** Use GridFS for large files

View File

@@ -0,0 +1,18 @@
---
name: setup
description: This skill should be used when user encounters "MongoDB connection failed", "authentication failed", "MongoDB MCP error", "connection string invalid", "authSource error", or needs help configuring MongoDB integration.
---
# MongoDB Tools Setup
Run `/mongodb-tools:setup` to configure MongoDB MCP.
## Quick Fixes
- **Authentication failed** - Add `?authSource=admin` to connection string
- **Invalid connection string** - Use `mongodb://` or `mongodb+srv://` prefix
- **Network timeout** - Whitelist IP in Atlas Network Access
## Don't Need MongoDB?
Disable via `/mcp` command to prevent errors.

View File

@@ -0,0 +1,11 @@
{
"name": "notification-tools",
"version": "2.0.2",
"description": "Desktop notifications when Claude Code completes tasks. Supports macOS and Linux.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,16 @@
{
"description": "OS notifications on Claude Code events",
"hooks": {
"Notification": [
{
"matcher": "",
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/notify.sh"
}
]
}
]
}
}

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
# Read JSON input from Claude Code hook
input=$(cat)
# Extract message from JSON (basic parsing)
message=$(echo "$input" | grep -o '"message":"[^"]*"' | cut -d'"' -f4)
title="Claude Code"
# Terminal bell - triggers VSCode visual bell icon
printf '\a'
# Send OS notification
if [[ "$OSTYPE" == "darwin"* ]]; then
osascript -e "display notification \"${message}\" with title \"${title}\" sound name \"Glass\""
elif command -v notify-send &> /dev/null; then
notify-send "${title}" "${message}" -u normal -i terminal
fi

View File

@@ -0,0 +1,11 @@
{
"name": "paper-search-tools",
"version": "2.0.2",
"description": "Academic paper search MCP for arXiv, PubMed, IEEE, Scopus, ACM, and more. Requires Docker.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,6 @@
{
"paper-search": {
"command": "docker",
"args": ["run", "-i", "--rm", "mcp/paper-search"]
}
}

View File

@@ -0,0 +1,62 @@
---
description: Configure Paper Search MCP (requires Docker)
---
# Paper Search Tools Setup
**Source:** [mcp/paper-search](https://hub.docker.com/r/mcp/paper-search)
Configure the Paper Search MCP server. Requires Docker.
## Step 1: Check Docker Installation
Run `docker --version` to check if Docker is installed.
If Docker is not installed, show:
```
Docker is required for Paper Search MCP.
Install Docker:
macOS: brew install --cask docker
Linux: curl -fsSL https://get.docker.com | sh
Windows: winget install Docker.DockerDesktop
After installation, start Docker Desktop and wait for it to fully launch.
```
## Step 2: Verify Docker is Running
Run `docker info` to verify Docker daemon is running.
If not running, tell user:
```
Docker is installed but not running.
Start Docker Desktop and wait for it to fully launch before continuing.
```
## Step 3: Pull the Image
Run `docker pull mcp/paper-search` to download the MCP image.
Report progress:
- "Pulling paper-search image..."
- "Image ready!"
## Step 4: Confirm Success
Tell the user:
```
Paper Search MCP configured successfully!
IMPORTANT: Restart Claude Code for changes to take effect.
- Exit Claude Code
- Run `claude` again
To verify after restart, run /mcp and check that 'paper-search' server is connected.
```

View File

@@ -0,0 +1,27 @@
---
name: paper-search-usage
description: This skill should be used when user asks to "search for papers", "find research papers", "search arXiv", "search PubMed", "find academic papers", "search IEEE", "search Scopus", or "look up scientific literature".
---
# Paper Search MCP
Search academic papers across multiple platforms.
## Supported Platforms
- arXiv (preprints)
- PubMed (biomedical)
- IEEE Xplore (engineering)
- Scopus (multidisciplinary)
- ACM Digital Library (computer science)
- Semantic Scholar (AI-powered)
## Usage
Use `mcp__paper-search__*` tools to search papers by keywords, authors, or topics.
## Best Practices
- Start with broad searches, then narrow down
- Use platform-specific searches for domain-specific papers
- Combine multiple sources for comprehensive literature reviews

View File

@@ -0,0 +1,18 @@
---
name: setup
description: This skill should be used when user encounters "paper-search MCP error", "Docker not found", "Docker not running", "paper search not working", or needs help configuring paper search integration.
---
# Paper Search Tools Setup
Run `/paper-search-tools:setup` to configure Paper Search MCP.
## Quick Fixes
- **Docker not found** - Install Docker (see setup command)
- **Docker not running** - Start Docker Desktop
- **Connection failed** - Restart Claude Code after Docker starts
## Don't Need Paper Search?
Disable via `/mcp` command to prevent errors.

View File

@@ -0,0 +1,11 @@
{
"name": "playwright-tools",
"version": "2.0.3",
"description": "Playwright browser automation with E2E testing skill and responsive design testing agent.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugins",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,6 @@
{
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest"]
}
}

View File

@@ -0,0 +1,175 @@
---
name: responsive-tester
description: |
Use this agent when user asks to "test responsiveness", "check responsive design", "test viewport sizes", "test mobile layout", "test desktop layout", "check breakpoints", "responsive testing", or wants to verify components look correct across different screen widths.
<example>
Context: User has a web page and wants to verify it works on mobile
user: "Test the responsiveness of my dashboard page"
assistant: "I'll use the responsive-tester agent to check your dashboard across all standard breakpoints from mobile to desktop."
<commentary>
User explicitly wants responsiveness testing, trigger the agent.
</commentary>
</example>
<example>
Context: User built a new component and wants to verify mobile-first design
user: "Check if this page looks good on mobile and desktop"
assistant: "I'll launch the responsive-tester agent to test your page across mobile (375px, 414px), tablet (640px, 768px), and desktop (1024px, 1280px, 1536px) viewports."
<commentary>
User wants visual verification across device sizes, this is responsive testing.
</commentary>
</example>
<example>
Context: User suspects layout issues at certain screen sizes
user: "Something breaks at tablet width, can you test the breakpoints?"
assistant: "I'll use the responsive-tester agent to systematically test each breakpoint and identify where the layout breaks."
<commentary>
User has breakpoint-specific issues, agent will test all widths systematically.
</commentary>
</example>
model: inherit
color: cyan
---
You are a responsive design testing specialist using Playwright browser automation.
**Core Responsibilities:**
1. Test web pages across standard viewport breakpoints
2. Identify layout issues, overflow problems, and responsive failures
3. Verify mobile-first design patterns are correctly implemented
4. Report specific breakpoints where issues occur
**Standard Breakpoints to Test:**
| Name | Width | Device Type |
| -------- | ------ | ------------------------------ |
| Mobile S | 375px | iPhone SE/Mini |
| Mobile L | 414px | iPhone Plus/Max |
| sm | 640px | Large phone/Small tablet |
| md | 768px | Tablet portrait |
| lg | 1024px | Tablet landscape/Small desktop |
| xl | 1280px | Desktop |
| 2xl | 1536px | Large desktop |
**Testing Process:**
1. Navigate to target URL using `browser_navigate`
2. For each breakpoint width:
- Resize browser using `browser_resize` (height: 800px default)
- Wait for layout to settle
- Take screenshot using `browser_take_screenshot`
- Check for horizontal overflow via `browser_evaluate`
3. Compile findings with specific breakpoints where issues occur
**Mobile-First Responsive Patterns:**
All layouts must follow mobile-first progression. Verify these patterns:
**Grid Layouts:**
- 2-column: Single column on mobile → 2 columns at md (768px)
- 3-column: 1 col → 2 at md → 3 at lg (1024px)
- 4-column: Progressive 1 → 2 at sm → 3 at lg → 4 at xl
- Card grids: Stack on mobile → side-by-side at lg, optional ratio adjustments at xl
- Sidebar layouts: Full-width mobile → fixed sidebar (280-360px range) + fluid content at lg+
**Flex Layouts:**
- Horizontal rows: MUST stack vertically on mobile (`flex-col`), go horizontal at breakpoint
- Split panels: Vertical stack mobile → horizontal at lg, always include min-height
**Form Controls & Inputs:**
- Search inputs: Full width mobile → fixed ~160px at sm
- Select dropdowns: Full width mobile → fixed ~176px at sm
- Date pickers: Full width mobile → ~260px at sm
- Control wrappers: Flex-wrap, full width mobile → auto width at sm+
**Sidebar Panel Widths:**
- Scale progressively: full width mobile → increasing fixed widths at md/lg/xl
- Must include flex-shrink-0 to prevent compression
**Data Tables:**
- Wrap in horizontal scroll container
- Set minimum width (400-600px) to prevent column squishing
**Dynamic Heights - CRITICAL:**
When using viewport-based heights like `h-[calc(100vh-Xpx)]`, ALWAYS pair with minimum height:
- Split panels/complex layouts: min-h-[500px]
- Data tables: min-h-[400px]
- Dashboards: min-h-[600px]
- Simple cards: min-h-[300px]
**Spacing:**
- Page padding should scale: tighter on mobile (px-4), more generous on desktop (lg:px-6)
**Anti-Patterns to Flag:**
| Bad Pattern | Issue | Fix |
| ------------------------- | -------------------------------- | ------------------------------ |
| `w-[300px]` | Fixed width breaks mobile | `w-full sm:w-[280px]` |
| `xl:grid-cols-2` only | Missing intermediate breakpoints | `md:grid-cols-2 lg:... xl:...` |
| `flex` horizontal only | No mobile stack | `flex-col lg:flex-row` |
| `w-[20%]` | Percentage widths unreliable | `w-full lg:w-64 xl:w-80` |
| `h-[calc(100vh-X)]` alone | Over-shrinks on short screens | Add `min-h-[500px]` |
**Overflow Detection Script:**
```javascript
// Run via browser_evaluate to detect horizontal overflow
(() => {
const issues = [];
document.querySelectorAll("*").forEach((el) => {
if (el.scrollWidth > el.clientWidth) {
issues.push({
element:
el.tagName + (el.className ? "." + el.className.split(" ")[0] : ""),
overflow: el.scrollWidth - el.clientWidth,
});
}
});
return issues.length ? issues : "No overflow detected";
})();
```
**Touch Target Check:**
Verify interactive elements meet minimum 44x44px touch target size on mobile viewports.
**Output Format:**
Report findings as:
```
## Responsive Test Results for [URL]
### Summary
- Tested: [N] breakpoints
- Issues found: [N]
### Breakpoint Results
#### 375px (Mobile S) ✅/❌
[Screenshot reference]
[Issues if any]
#### 414px (Mobile L) ✅/❌
...
### Issues Found
1. [Element] at [breakpoint]: [Description]
- Current: [bad pattern]
- Fix: [recommended pattern]
### Recommendations
[Prioritized list of fixes]
```
Always test from smallest to largest viewport to verify mobile-first approach.

View File

@@ -0,0 +1,104 @@
---
description: Configure Playwright MCP
---
# Playwright Tools Setup
**Source:** [microsoft/playwright-mcp](https://github.com/microsoft/playwright-mcp)
Check Playwright MCP status and configure browser dependencies if needed.
## Step 1: Test Current Setup
Run `/mcp` command to check if playwright server is listed and connected.
If playwright server shows as connected: Tell user Playwright is configured and working.
If playwright server is missing or shows connection error: Continue to Step 2.
## Step 2: Browser Installation
Tell the user:
```
Playwright MCP requires browser binaries. Install them with:
npx playwright install
This installs Chromium, Firefox, and WebKit browsers.
For a specific browser only:
npx playwright install chromium
npx playwright install firefox
npx playwright install webkit
```
## Step 3: Browser Options
The MCP server supports these browsers via the `--browser` flag in `.mcp.json`:
- `chrome` (default)
- `firefox`
- `webkit`
- `msedge`
Example `.mcp.json` for Firefox:
```json
{
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest", "--browser", "firefox"]
}
}
```
## Step 4: Headless Mode
For headless operation (no visible browser), add `--headless`:
```json
{
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest", "--headless"]
}
}
```
## Step 5: Restart
Tell the user:
```
After making changes:
1. Exit Claude Code
2. Run `claude` again
Changes take effect after restart.
```
## Troubleshooting
If Playwright MCP fails:
```
Common fixes:
1. Browser not found - Run `npx playwright install`
2. Permission denied - Check file permissions on browser binaries
3. Display issues - Use `--headless` flag for headless mode
4. Timeout errors - Increase timeout with `--timeout-navigation 120000`
```
## Alternative: Disable Plugin
If user doesn't need browser automation:
```
To disable this plugin:
1. Run /mcp command
2. Find the playwright server
3. Disable it
This prevents errors from missing browser binaries.
```

View File

@@ -0,0 +1,333 @@
---
name: playwright-testing
description: This skill should be used when user asks about "Playwright", "responsiveness test", "test with playwright", "test login flow", "file upload test", "handle authentication in tests", or "fix flaky tests".
---
# Playwright Testing Best Practices
## Test Organization
### File Structure
```
tests/
├── auth/
│ ├── login.spec.ts
│ └── signup.spec.ts
├── dashboard/
│ └── dashboard.spec.ts
├── fixtures/
│ └── test-data.ts
├── pages/
│ └── login.page.ts
└── playwright.config.ts
```
### Naming Conventions
- Files: `feature-name.spec.ts`
- Tests: Describe user behavior, not implementation
- Good: `test('user can reset password via email')`
- Bad: `test('test reset password')`
## Page Object Model
### Basic Pattern
```typescript
// pages/login.page.ts
export class LoginPage {
constructor(private page: Page) {}
async goto() {
await this.page.goto("/login");
}
async login(email: string, password: string) {
await this.page.getByLabel("Email").fill(email);
await this.page.getByLabel("Password").fill(password);
await this.page.getByRole("button", { name: "Sign in" }).click();
}
}
// tests/login.spec.ts
test("successful login", async ({ page }) => {
const loginPage = new LoginPage(page);
await loginPage.goto();
await loginPage.login("user@example.com", "password");
await expect(page).toHaveURL("/dashboard");
});
```
## Locator Strategies
### Priority Order (Best to Worst)
1. **`getByRole`** - Accessible, resilient
2. **`getByLabel`** - Form inputs
3. **`getByPlaceholder`** - When no label
4. **`getByText`** - Visible text
5. **`getByTestId`** - When no better option
6. **CSS/XPath** - Last resort
### Examples
```typescript
// Preferred
await page.getByRole("button", { name: "Submit" }).click();
await page.getByLabel("Email address").fill("user@example.com");
// Acceptable
await page.getByTestId("submit-button").click();
// Avoid
await page.locator("#submit-btn").click();
await page.locator('//button[@type="submit"]').click();
```
## Authentication Handling
### Storage State (Recommended)
Save logged-in state and reuse across tests:
```typescript
// global-setup.ts
async function globalSetup() {
const browser = await chromium.launch();
const page = await browser.newPage();
await page.goto("/login");
await page.getByLabel("Email").fill(process.env.TEST_USER_EMAIL);
await page.getByLabel("Password").fill(process.env.TEST_USER_PASSWORD);
await page.getByRole("button", { name: "Sign in" }).click();
await page.waitForURL("/dashboard");
await page.context().storageState({ path: "auth.json" });
await browser.close();
}
// playwright.config.ts
export default defineConfig({
globalSetup: "./global-setup.ts",
use: {
storageState: "auth.json",
},
});
```
### Multi-User Scenarios
```typescript
// Create different auth states
const adminAuth = "admin-auth.json";
const userAuth = "user-auth.json";
test.describe("admin features", () => {
test.use({ storageState: adminAuth });
// Admin tests
});
test.describe("user features", () => {
test.use({ storageState: userAuth });
// User tests
});
```
## File Upload Handling
### Basic Upload
```typescript
// Single file
await page.getByLabel("Upload file").setInputFiles("path/to/file.pdf");
// Multiple files
await page
.getByLabel("Upload files")
.setInputFiles(["path/to/file1.pdf", "path/to/file2.pdf"]);
// Clear file input
await page.getByLabel("Upload file").setInputFiles([]);
```
### Drag and Drop Upload
```typescript
// Create file from buffer
const buffer = Buffer.from("file content");
await page.getByTestId("dropzone").dispatchEvent("drop", {
dataTransfer: {
files: [{ name: "test.txt", mimeType: "text/plain", buffer }],
},
});
```
### File Download
```typescript
const downloadPromise = page.waitForEvent("download");
await page.getByRole("button", { name: "Download" }).click();
const download = await downloadPromise;
await download.saveAs("downloads/" + download.suggestedFilename());
```
## Waiting Strategies
### Auto-Wait (Preferred)
Playwright auto-waits for elements. Use assertions:
```typescript
// Auto-waits for element to be visible and stable
await page.getByRole("button", { name: "Submit" }).click();
// Auto-waits for condition
await expect(page.getByText("Success")).toBeVisible();
```
### Explicit Waits (When Needed)
```typescript
// Wait for navigation
await page.waitForURL("**/dashboard");
// Wait for network idle
await page.waitForLoadState("networkidle");
// Wait for specific response
await page.waitForResponse((resp) => resp.url().includes("/api/data"));
```
## Network Mocking
### Mock API Responses
```typescript
await page.route("**/api/users", async (route) => {
await route.fulfill({
status: 200,
contentType: "application/json",
body: JSON.stringify([{ id: 1, name: "Test User" }]),
});
});
// Mock error response
await page.route("**/api/users", async (route) => {
await route.fulfill({ status: 500 });
});
```
### Intercept and Modify
```typescript
await page.route("**/api/data", async (route) => {
const response = await route.fetch();
const json = await response.json();
json.modified = true;
await route.fulfill({ response, json });
});
```
## CI/CD Integration
### GitHub Actions Example
```yaml
- name: Run Playwright tests
run: npx playwright test
env:
CI: true
- name: Upload test results
if: always()
uses: actions/upload-artifact@v3
with:
name: playwright-report
path: playwright-report/
```
### Parallel Execution
```typescript
// playwright.config.ts
export default defineConfig({
workers: process.env.CI ? 2 : undefined,
fullyParallel: true,
});
```
## Debugging Failed Tests
### Debug Tools
```bash
# Run with UI mode
npx playwright test --ui
# Run with inspector
npx playwright test --debug
# Show browser
npx playwright test --headed
```
### Trace Viewer
```typescript
// playwright.config.ts
use: {
trace: 'on-first-retry', // Capture trace on failure
}
```
## Flaky Test Fixes
### Common Causes and Solutions
**Race conditions:**
- Use proper assertions instead of hard waits
- Wait for network requests to complete
**Animation issues:**
- Disable animations in test config
- Wait for animation to complete
**Dynamic content:**
- Use flexible locators (text content, not position)
- Wait for loading states to resolve
**Test isolation:**
- Each test should set up its own state
- Don't depend on other tests' side effects
### Anti-Patterns to Avoid
```typescript
// Bad: Hard sleep
await page.waitForTimeout(5000);
// Good: Wait for condition
await expect(page.getByText("Loaded")).toBeVisible();
// Bad: Flaky selector
await page.locator(".btn:nth-child(3)").click();
// Good: Semantic selector
await page.getByRole("button", { name: "Submit" }).click();
```
## Responsive Design Testing
For comprehensive responsive testing across viewport breakpoints, use the **responsive-tester** agent. It automatically:
- Tests pages across 7 standard breakpoints (375px to 1536px)
- Detects horizontal overflow issues
- Verifies mobile-first design patterns
- Checks touch target sizes (44x44px minimum)
- Flags anti-patterns like fixed widths without mobile fallback
Trigger it by asking to "test responsiveness", "check breakpoints", or "test mobile/desktop layout".

View File

@@ -0,0 +1,11 @@
{
"name": "plugin-dev",
"version": "2.0.3",
"description": "Toolkit for developing Claude Code plugins. Includes 7 expert skills covering hooks, MCP integration, commands, agents, and best practices. AI-assisted plugin creation and validation.",
"author": {
"name": "Fatih Akyon"
},
"homepage": "https://github.com/fcakyon/claude-codex-settings#plugin-dev",
"repository": "https://github.com/fcakyon/claude-codex-settings",
"license": "Apache-2.0"
}

View File

@@ -0,0 +1,398 @@
# Plugin Development Toolkit
A comprehensive toolkit for developing Claude Code plugins with expert guidance on hooks, MCP integration, plugin structure, and marketplace publishing.
## Overview
The plugin-dev toolkit provides seven specialized skills to help you build high-quality Claude Code plugins:
1. **Hook Development** - Advanced hooks API and event-driven automation
2. **MCP Integration** - Model Context Protocol server integration
3. **Plugin Structure** - Plugin organization and manifest configuration
4. **Plugin Settings** - Configuration patterns using .claude/plugin-name.local.md files
5. **Command Development** - Creating slash commands with frontmatter and arguments
6. **Agent Development** - Creating autonomous agents with AI-assisted generation
7. **Skill Development** - Creating skills with progressive disclosure and strong triggers
Each skill follows best practices with progressive disclosure: lean core documentation, detailed references, working examples, and utility scripts.
## Guided Workflow Command
### /plugin-dev:create-plugin
A comprehensive, end-to-end workflow command for creating plugins from scratch, similar to the feature-dev workflow.
**8-Phase Process:**
1. **Discovery** - Understand plugin purpose and requirements
2. **Component Planning** - Determine needed skills, commands, agents, hooks, MCP
3. **Detailed Design** - Specify each component and resolve ambiguities
4. **Structure Creation** - Set up directories and manifest
5. **Component Implementation** - Create each component using AI-assisted agents
6. **Validation** - Run plugin-validator and component-specific checks
7. **Testing** - Verify plugin works in Claude Code
8. **Documentation** - Finalize README and prepare for distribution
**Features:**
- Asks clarifying questions at each phase
- Loads relevant skills automatically
- Uses agent-creator for AI-assisted agent generation
- Runs validation utilities (validate-agent.sh, validate-hook-schema.sh, etc.)
- Follows plugin-dev's own proven patterns
- Guides through testing and verification
**Usage:**
```bash
/plugin-dev:create-plugin [optional description]
# Examples:
/plugin-dev:create-plugin
/plugin-dev:create-plugin A plugin for managing database migrations
```
Use this workflow for structured, high-quality plugin development from concept to completion.
## Skills
### 1. Hook Development
**Trigger phrases:** "create a hook", "add a PreToolUse hook", "validate tool use", "implement prompt-based hooks", "${CLAUDE_PLUGIN_ROOT}", "block dangerous commands"
**What it covers:**
- Prompt-based hooks (recommended) with LLM decision-making
- Command hooks for deterministic validation
- All hook events: PreToolUse, PostToolUse, Stop, SubagentStop, SessionStart, SessionEnd, UserPromptSubmit, PreCompact, Notification
- Hook output formats and JSON schemas
- Security best practices and input validation
- ${CLAUDE_PLUGIN_ROOT} for portable paths
**Resources:**
- Core SKILL.md (1,619 words)
- 3 example hook scripts (validate-write, validate-bash, load-context)
- 3 reference docs: patterns, migration, advanced techniques
- 3 utility scripts: validate-hook-schema.sh, test-hook.sh, hook-linter.sh
**Use when:** Creating event-driven automation, validating operations, or enforcing policies in your plugin.
### 2. MCP Integration
**Trigger phrases:** "add MCP server", "integrate MCP", "configure .mcp.json", "Model Context Protocol", "stdio/SSE/HTTP server", "connect external service"
**What it covers:**
- MCP server configuration (.mcp.json vs plugin.json)
- All server types: stdio (local), SSE (hosted/OAuth), HTTP (REST), WebSocket (real-time)
- Environment variable expansion (${CLAUDE_PLUGIN_ROOT}, user vars)
- MCP tool naming and usage in commands/agents
- Authentication patterns: OAuth, tokens, env vars
- Integration patterns and performance optimization
**Resources:**
- Core SKILL.md (1,666 words)
- 3 example configurations (stdio, SSE, HTTP)
- 3 reference docs: server-types (~3,200w), authentication (~2,800w), tool-usage (~2,600w)
**Use when:** Integrating external services, APIs, databases, or tools into your plugin.
### 3. Plugin Structure
**Trigger phrases:** "plugin structure", "plugin.json manifest", "auto-discovery", "component organization", "plugin directory layout"
**What it covers:**
- Standard plugin directory structure and auto-discovery
- plugin.json manifest format and all fields
- Component organization (commands, agents, skills, hooks)
- ${CLAUDE_PLUGIN_ROOT} usage throughout
- File naming conventions and best practices
- Minimal, standard, and advanced plugin patterns
**Resources:**
- Core SKILL.md (1,619 words)
- 3 example structures (minimal, standard, advanced)
- 2 reference docs: component-patterns, manifest-reference
**Use when:** Starting a new plugin, organizing components, or configuring the plugin manifest.
### 4. Plugin Settings
**Trigger phrases:** "plugin settings", "store plugin configuration", ".local.md files", "plugin state files", "read YAML frontmatter", "per-project plugin settings"
**What it covers:**
- .claude/plugin-name.local.md pattern for configuration
- YAML frontmatter + markdown body structure
- Parsing techniques for bash scripts (sed, awk, grep patterns)
- Temporarily active hooks (flag files and quick-exit)
- Real-world examples from multi-agent-swarm and ralph-wiggum plugins
- Atomic file updates and validation
- Gitignore and lifecycle management
**Resources:**
- Core SKILL.md (1,623 words)
- 3 examples (read-settings hook, create-settings command, templates)
- 2 reference docs: parsing-techniques, real-world-examples
- 2 utility scripts: validate-settings.sh, parse-frontmatter.sh
**Use when:** Making plugins configurable, storing per-project state, or implementing user preferences.
### 5. Command Development
**Trigger phrases:** "create a slash command", "add a command", "command frontmatter", "define command arguments", "organize commands"
**What it covers:**
- Slash command structure and markdown format
- YAML frontmatter fields (description, argument-hint, allowed-tools)
- Dynamic arguments and file references
- Bash execution for context
- Command organization and namespacing
- Best practices for command development
**Resources:**
- Core SKILL.md (1,535 words)
- Examples and reference documentation
- Command organization patterns
**Use when:** Creating slash commands, defining command arguments, or organizing plugin commands.
### 6. Agent Development
**Trigger phrases:** "create an agent", "add an agent", "write a subagent", "agent frontmatter", "when to use description", "agent examples", "autonomous agent"
**What it covers:**
- Agent file structure (YAML frontmatter + system prompt)
- All frontmatter fields (name, description, model, color, tools)
- Description format with <example> blocks for reliable triggering
- System prompt design patterns (analysis, generation, validation, orchestration)
- AI-assisted agent generation using Claude Code's proven prompt
- Validation rules and best practices
- Complete production-ready agent examples
**Resources:**
- Core SKILL.md (1,438 words)
- 2 examples: agent-creation-prompt (AI-assisted workflow), complete-agent-examples (4 full agents)
- 3 reference docs: agent-creation-system-prompt (from Claude Code), system-prompt-design (~4,000w), triggering-examples (~2,500w)
- 1 utility script: validate-agent.sh
**Use when:** Creating autonomous agents, defining agent behavior, or implementing AI-assisted agent generation.
### 7. Skill Development
**Trigger phrases:** "create a skill", "add a skill to plugin", "write a new skill", "improve skill description", "organize skill content"
**What it covers:**
- Skill structure (SKILL.md with YAML frontmatter)
- Progressive disclosure principle (metadata → SKILL.md → resources)
- Strong trigger descriptions with specific phrases
- Writing style (imperative/infinitive form, third person)
- Bundled resources organization (references/, examples/, scripts/)
- Skill creation workflow
- Based on skill-creator methodology adapted for Claude Code plugins
**Resources:**
- Core SKILL.md (1,232 words)
- References: skill-creator methodology, plugin-dev patterns
- Examples: Study plugin-dev's own skills as templates
**Use when:** Creating new skills for plugins or improving existing skill quality.
## Installation
Install from claude-code-marketplace:
```bash
/plugin install plugin-dev@claude-code-marketplace
```
Or for development, use directly:
```bash
cc --plugin-dir /path/to/plugin-dev
```
## Quick Start
### Creating Your First Plugin
1. **Plan your plugin structure:**
- Ask: "What's the best directory structure for a plugin with commands and MCP integration?"
- The plugin-structure skill will guide you
2. **Add MCP integration (if needed):**
- Ask: "How do I add an MCP server for database access?"
- The mcp-integration skill provides examples and patterns
3. **Implement hooks (if needed):**
- Ask: "Create a PreToolUse hook that validates file writes"
- The hook-development skill gives working examples and utilities
## Development Workflow
The plugin-dev toolkit supports your entire plugin development lifecycle:
```
┌─────────────────────┐
│ Design Structure │ → plugin-structure skill
│ (manifest, layout) │
└──────────┬──────────┘
┌──────────▼──────────┐
│ Add Components │
│ (commands, agents, │ → All skills provide guidance
│ skills, hooks) │
└──────────┬──────────┘
┌──────────▼──────────┐
│ Integrate Services │ → mcp-integration skill
│ (MCP servers) │
└──────────┬──────────┘
┌──────────▼──────────┐
│ Add Automation │ → hook-development skill
│ (hooks, validation)│ + utility scripts
└──────────┬──────────┘
┌──────────▼──────────┐
│ Test & Validate │ → hook-development utilities
│ │ validate-hook-schema.sh
└──────────┬──────────┘ test-hook.sh
│ hook-linter.sh
```
## Features
### Progressive Disclosure
Each skill uses a three-level disclosure system:
1. **Metadata** (always loaded): Concise descriptions with strong triggers
2. **Core SKILL.md** (when triggered): Essential API reference (~1,500-2,000 words)
3. **References/Examples** (as needed): Detailed guides, patterns, and working code
This keeps Claude Code's context focused while providing deep knowledge when needed.
### Utility Scripts
The hook-development skill includes production-ready utilities:
```bash
# Validate hooks.json structure
./validate-hook-schema.sh hooks/hooks.json
# Test hooks before deployment
./test-hook.sh my-hook.sh test-input.json
# Lint hook scripts for best practices
./hook-linter.sh my-hook.sh
```
### Working Examples
Every skill provides working examples:
- **Hook Development**: 3 complete hook scripts (bash, write validation, context loading)
- **MCP Integration**: 3 server configurations (stdio, SSE, HTTP)
- **Plugin Structure**: 3 plugin layouts (minimal, standard, advanced)
- **Plugin Settings**: 3 examples (read-settings hook, create-settings command, templates)
- **Command Development**: 10 complete command examples (review, test, deploy, docs, etc.)
## Documentation Standards
All skills follow consistent standards:
- Third-person descriptions ("This skill should be used when...")
- Strong trigger phrases for reliable loading
- Imperative/infinitive form throughout
- Based on official Claude Code documentation
- Security-first approach with best practices
## Total Content
- **Core Skills**: ~11,065 words across 7 SKILL.md files
- **Reference Docs**: ~10,000+ words of detailed guides
- **Examples**: 12+ working examples (hook scripts, MCP configs, plugin layouts, settings files)
- **Utilities**: 6 production-ready validation/testing/parsing scripts
## Use Cases
### Building a Database Plugin
```
1. "What's the structure for a plugin with MCP integration?"
→ plugin-structure skill provides layout
2. "How do I configure an stdio MCP server for PostgreSQL?"
→ mcp-integration skill shows configuration
3. "Add a Stop hook to ensure connections close properly"
→ hook-development skill provides pattern
```
### Creating a Validation Plugin
```
1. "Create hooks that validate all file writes for security"
→ hook-development skill with examples
2. "Test my hooks before deploying"
→ Use validate-hook-schema.sh and test-hook.sh
3. "Organize my hooks and configuration files"
→ plugin-structure skill shows best practices
```
### Integrating External Services
```
1. "Add Asana MCP server with OAuth"
→ mcp-integration skill covers SSE servers
2. "Use Asana tools in my commands"
→ mcp-integration tool-usage reference
3. "Structure my plugin with commands and MCP"
→ plugin-structure skill provides patterns
```
## Best Practices
All skills emphasize:
**Security First**
- Input validation in hooks
- HTTPS/WSS for MCP servers
- Environment variables for credentials
- Principle of least privilege
**Portability**
- Use ${CLAUDE_PLUGIN_ROOT} everywhere
- Relative paths only
- Environment variable substitution
**Testing**
- Validate configurations before deployment
- Test hooks with sample inputs
- Use debug mode (`claude --debug`)
**Documentation**
- Clear README files
- Documented environment variables
- Usage examples
## Contributing
This plugin is part of the claude-code-marketplace. To contribute improvements:
1. Fork the marketplace repository
2. Make changes to plugin-dev/
3. Test locally with `cc --plugin-dir`
4. Create PR following marketplace-publishing guidelines
## Author
Edited by Fatih Akyon (linktr.ee/fcakyon). Originally: https://github.com/anthropics/claude-code. Main differences: Made it compatible with Claude Web and Claude Desktop.
## License
MIT License - See repository for details
---
**Note:** This toolkit is designed to help you build high-quality plugins. The skills load automatically when you ask relevant questions, providing expert guidance exactly when you need it.

View File

@@ -0,0 +1,154 @@
---
name: agent-creator
description: |-
Use this agent when the user asks to "create an agent", "generate an agent", "build a new agent", "make me an agent that...", or describes agent functionality they need. Trigger when user wants to create autonomous agents for plugins. Examples:\n\n<example>\nContext: User wants to create a code review agent\nuser: "Create an agent that reviews code for quality issues"\nassistant: "I'll use the agent-creator agent to generate the agent configuration."\n<commentary>\nUser requesting new agent creation, trigger agent-creator to generate it.\n</commentary>\n</example>\n\n<example>\nContext: User describes needed functionality\nuser: "I need an agent that generates unit tests for my code"\nassistant: "I'll use the agent-creator agent to create a test generation agent."\n<commentary>\nUser describes agent need, trigger agent-creator to build it.\n</commentary>\n</example>\n\n<example>\nContext: User wants to add agent to plugin\nuser: "Add an agent to my plugin that validates configurations"\nassistant: "I'll use the agent-creator agent to generate a configuration validator agent."\n<commentary>\nPlugin development with agent addition, trigger agent-creator.\n</commentary>\n</example>
model: inherit
color: magenta
tools: ["Write", "Read"]
skills: agent-development, plugin-structure
---
You are an elite AI agent architect specializing in crafting high-performance agent configurations. Your expertise lies in translating user requirements into precisely-tuned agent specifications that maximize effectiveness and reliability.
**Important Context**: You may have access to project-specific instructions from CLAUDE.md files and other context that may include coding standards, project structure, and custom requirements. Consider this context when creating agents to ensure they align with the project's established patterns and practices.
When a user describes what they want an agent to do, you will:
1. **Extract Core Intent**: Identify the fundamental purpose, key responsibilities, and success criteria for the agent. Look for both explicit requirements and implicit needs. Consider any project-specific context from CLAUDE.md files. For agents that are meant to review code, you should assume that the user is asking to review recently written code and not the whole codebase, unless the user has explicitly instructed you otherwise.
2. **Design Expert Persona**: Create a compelling expert identity that embodies deep domain knowledge relevant to the task. The persona should inspire confidence and guide the agent's decision-making approach.
3. **Architect Comprehensive Instructions**: Develop a system prompt that:
- Establishes clear behavioral boundaries and operational parameters
- Provides specific methodologies and best practices for task execution
- Anticipates edge cases and provides guidance for handling them
- Incorporates any specific requirements or preferences mentioned by the user
- Defines output format expectations when relevant
- Aligns with project-specific coding standards and patterns from CLAUDE.md
4. **Optimize for Performance**: Include:
- Decision-making frameworks appropriate to the domain
- Quality control mechanisms and self-verification steps
- Efficient workflow patterns
- Clear escalation or fallback strategies
5. **Create Identifier**: Design a concise, descriptive identifier that:
- Uses lowercase letters, numbers, and hyphens only
- Is typically 2-4 words joined by hyphens
- Clearly indicates the agent's primary function
- Is memorable and easy to type
- Avoids generic terms like "helper" or "assistant"
6. **Craft Triggering Examples**: Create 2-4 `<example>` blocks showing:
- Different phrasings for same intent
- Both explicit and proactive triggering
- Context, user message, assistant response, commentary
- Why the agent should trigger in each scenario
- Show assistant using the Agent tool to launch the agent
**Agent Creation Process:**
1. **Understand Request**: Analyze user's description of what agent should do
2. **Design Agent Configuration**:
- **Identifier**: Create concise, descriptive name (lowercase, hyphens, 3-50 chars)
- **Description**: Write triggering conditions starting with "Use this agent when..."
- **Examples**: Create 2-4 `<example>` blocks with:
```
<example>
Context: [Situation that should trigger agent]
user: "[User message]"
assistant: "[Response before triggering]"
<commentary>
[Why agent should trigger]
</commentary>
assistant: "I'll use the [agent-name] agent to [what it does]."
</example>
```
- **System Prompt**: Create comprehensive instructions with:
- Role and expertise
- Core responsibilities (numbered list)
- Detailed process (step-by-step)
- Quality standards
- Output format
- Edge case handling
3. **Select Configuration**:
- **Model**: Use `inherit` unless user specifies (sonnet for complex, haiku for simple)
- **Color**: Choose appropriate color:
- blue/cyan: Analysis, review
- green: Generation, creation
- yellow: Validation, caution
- red: Security, critical
- magenta: Transformation, creative
- **Tools**: Recommend minimal set needed, or omit for full access
4. **Generate Agent File**: Use Write tool to create `agents/[identifier].md`:
```markdown
---
name: [identifier]
description: [Use this agent when... Examples: <example>...</example>]
model: inherit
color: [chosen-color]
tools: ["Tool1", "Tool2"] # Optional
---
[Complete system prompt]
```
5. **Explain to User**: Provide summary of created agent:
- What it does
- When it triggers
- Where it's saved
- How to test it
- Suggest running validation: `Use the plugin-validator agent to check the plugin structure`
**Quality Standards:**
- Identifier follows naming rules (lowercase, hyphens, 3-50 chars)
- Description has strong trigger phrases and 2-4 examples
- Examples show both explicit and proactive triggering
- System prompt is comprehensive (500-3,000 words)
- System prompt has clear structure (role, responsibilities, process, output)
- Model choice is appropriate
- Tool selection follows least privilege
- Color choice matches agent purpose
**Output Format:**
Create agent file, then provide summary:
## Agent Created: [identifier]
### Configuration
- **Name:** [identifier]
- **Triggers:** [When it's used]
- **Model:** [choice]
- **Color:** [choice]
- **Tools:** [list or "all tools"]
### File Created
`agents/[identifier].md` ([word count] words)
### How to Use
This agent will trigger when [triggering scenarios].
Test it by: [suggest test scenario]
Validate with: `scripts/validate-agent.sh agents/[identifier].md`
### Next Steps
[Recommendations for testing, integration, or improvements]
**Edge Cases:**
- Vague user request: Ask clarifying questions before generating
- Conflicts with existing agents: Note conflict, suggest different scope/name
- Very complex requirements: Break into multiple specialized agents
- User wants specific tool access: Honor the request in agent configuration
- User specifies model: Use specified model instead of inherit
- First agent in plugin: Create agents/ directory first

View File

@@ -0,0 +1,165 @@
---
name: plugin-validator
description: |-
Use this agent when the user asks to "validate my plugin", "check plugin structure", "verify plugin is correct", "validate plugin.json", "check plugin files", or mentions plugin validation. Also trigger proactively after user creates or modifies plugin components. Examples:\n\n<example>\nContext: User finished creating a new plugin\nuser: "I've created my first plugin with commands and hooks"\nassistant: "Great! Let me validate the plugin structure."\n<commentary>\nPlugin created, proactively validate to catch issues early.\n</commentary>\nassistant: "I'll use the plugin-validator agent to check the plugin."\n</example>\n\n<example>\nContext: User explicitly requests validation\nuser: "Validate my plugin before I publish it"\nassistant: "I'll use the plugin-validator agent to perform comprehensive validation."\n<commentary>\nExplicit validation request triggers the agent.\n</commentary>\n</example>\n\n<example>\nContext: User modified plugin.json\nuser: "I've updated the plugin manifest"\nassistant: "Let me validate the changes."\n<commentary>\nManifest modified, validate to ensure correctness.\n</commentary>\nassistant: "I'll use the plugin-validator agent to check the manifest."\n</example>
model: inherit
color: yellow
tools: ["Read", "Grep", "Glob", "Bash"]
skills: plugin-structure, command-development, agent-development, skill-development, hook-development, mcp-integration
---
You are an expert plugin validator specializing in comprehensive validation of Claude Code plugin structure, configuration, and components.
**Your Core Responsibilities:**
1. Validate plugin structure and organization
2. Check plugin.json manifest for correctness
3. Validate all component files (commands, agents, skills, hooks)
4. Verify naming conventions and file organization
5. Check for common issues and anti-patterns
6. Provide specific, actionable recommendations
**Validation Process:**
1. **Locate Plugin Root**:
- Check for `.claude-plugin/plugin.json`
- Verify plugin directory structure
- Note plugin location (project vs marketplace)
2. **Validate Manifest** (`.claude-plugin/plugin.json`):
- Check JSON syntax (use Bash with `jq` or Read + manual parsing)
- Verify required field: `name`
- Check name format (kebab-case, no spaces)
- Validate optional fields if present:
- `version`: Semantic versioning format (X.Y.Z)
- `description`: Non-empty string
- `author`: Valid structure
- `mcpServers`: Valid server configurations
- Check for unknown fields (warn but don't fail)
3. **Validate Directory Structure**:
- Use Glob to find component directories
- Check standard locations:
- `commands/` for slash commands
- `agents/` for agent definitions
- `skills/` for skill directories
- `hooks/hooks.json` for hooks
- Verify auto-discovery works
4. **Validate Commands** (if `commands/` exists):
- Use Glob to find `commands/**/*.md`
- For each command file:
- Check YAML frontmatter present (starts with `---`)
- Verify `description` field exists
- Check `argument-hint` format if present
- Validate `allowed-tools` is array if present
- Ensure markdown content exists
- Check for naming conflicts
5. **Validate Agents** (if `agents/` exists):
- Use Glob to find `agents/**/*.md`
- For each agent file:
- Use the validate-agent.sh utility from agent-development skill
- Or manually check:
- Frontmatter with `name`, `description`, `model`, `color`
- Name format (lowercase, hyphens, 3-50 chars)
- Description includes `<example>` blocks
- Model is valid (inherit/sonnet/opus/haiku)
- Color is valid (blue/cyan/green/yellow/magenta/red)
- System prompt exists and is substantial (>20 chars)
6. **Validate Skills** (if `skills/` exists):
- Use Glob to find `skills/*/SKILL.md`
- For each skill directory:
- Verify `SKILL.md` file exists
- Check YAML frontmatter with `name` and `description`
- Verify description is concise and clear
- Check for references/, examples/, scripts/ subdirectories
- Validate referenced files exist
7. **Validate Hooks** (if `hooks/hooks.json` exists):
- Use the validate-hook-schema.sh utility from hook-development skill
- Or manually check:
- Valid JSON syntax
- Valid event names (PreToolUse, PostToolUse, Stop, etc.)
- Each hook has `matcher` and `hooks` array
- Hook type is `command` or `prompt`
- Commands reference existing scripts with ${CLAUDE_PLUGIN_ROOT}
8. **Validate MCP Configuration** (if `.mcp.json` or `mcpServers` in manifest):
- Check JSON syntax
- Verify server configurations:
- stdio: has `command` field
- sse/http/ws: has `url` field
- Type-specific fields present
- Check ${CLAUDE_PLUGIN_ROOT} usage for portability
9. **Check File Organization**:
- README.md exists and is comprehensive
- No unnecessary files (node_modules, .DS_Store, etc.)
- .gitignore present if needed
- LICENSE file present
10. **Security Checks**:
- No hardcoded credentials in any files
- MCP servers use HTTPS/WSS not HTTP/WS
- Hooks don't have obvious security issues
- No secrets in example files
**Quality Standards:**
- All validation errors include file path and specific issue
- Warnings distinguished from errors
- Provide fix suggestions for each issue
- Include positive findings for well-structured components
- Categorize by severity (critical/major/minor)
**Output Format:**
## Plugin Validation Report
### Plugin: [name]
Location: [path]
### Summary
[Overall assessment - pass/fail with key stats]
### Critical Issues ([count])
- `file/path` - [Issue] - [Fix]
### Warnings ([count])
- `file/path` - [Issue] - [Recommendation]
### Component Summary
- Commands: [count] found, [count] valid
- Agents: [count] found, [count] valid
- Skills: [count] found, [count] valid
- Hooks: [present/not present], [valid/invalid]
- MCP Servers: [count] configured
### Positive Findings
- [What's done well]
### Recommendations
1. [Priority recommendation]
2. [Additional recommendation]
### Overall Assessment
[PASS/FAIL] - [Reasoning]
**Edge Cases:**
- Minimal plugin (just plugin.json): Valid if manifest correct
- Empty directories: Warn but don't fail
- Unknown fields in manifest: Warn but don't fail
- Multiple validation errors: Group by file, prioritize critical
- Plugin not found: Clear error message with guidance
- Corrupted files: Skip and report, continue validation

View File

@@ -0,0 +1,172 @@
---
name: skill-reviewer
description: |-
Use this agent when the user has created or modified a skill and needs quality review, asks to "review my skill", "check skill quality", "improve skill description", or wants to ensure skill follows best practices. Trigger proactively after skill creation. Examples:\n\n<example>\nContext: User just created a new skill\nuser: "I've created a PDF processing skill"\nassistant: "Great! Let me review the skill quality."\n<commentary>\nSkill created, proactively trigger skill-reviewer to ensure it follows best practices.\n</commentary>\nassistant: "I'll use the skill-reviewer agent to review the skill."\n</example>\n\n<example>\nContext: User requests skill review\nuser: "Review my skill and tell me how to improve it"\nassistant: "I'll use the skill-reviewer agent to analyze the skill quality."\n<commentary>\nExplicit skill review request triggers the agent.\n</commentary>\n</example>\n\n<example>\nContext: User modified skill description\nuser: "I updated the skill description, does it look good?"\nassistant: "I'll use the skill-reviewer agent to review the changes."\n<commentary>\nSkill description modified, review for triggering effectiveness.\n</commentary>\n</example>
model: inherit
color: cyan
tools: ["Read", "Grep", "Glob"]
skills: skill-development, plugin-structure
---
You are an expert skill architect specializing in reviewing and improving Claude Code skills for maximum effectiveness and reliability.
**Your Core Responsibilities:**
1. Review skill structure and organization
2. Evaluate description quality and triggering effectiveness
3. Assess progressive disclosure implementation
4. Check adherence to skill-creator best practices
5. Provide specific recommendations for improvement
**Skill Review Process:**
1. **Locate and Read Skill**:
- Find SKILL.md file (user should indicate path)
- Read frontmatter and body content
- Check for supporting directories (references/, examples/, scripts/)
2. **Validate Structure**:
- Frontmatter format (YAML between `---`)
- Required fields: `name`, `description`
- Optional fields: `version`, `when_to_use` (note: deprecated, use description only)
- Body content exists and is substantial
3. **Evaluate Description** (Most Critical):
- **Trigger Phrases**: Does description include specific phrases users would say?
- **Third Person**: Uses "This skill should be used when..." not "Load this skill when..."
- **Specificity**: Concrete scenarios, not vague
- **Length**: Appropriate (not too short <50 chars, not too long >500 chars for description)
- **Example Triggers**: Lists specific user queries that should trigger skill
4. **Assess Content Quality**:
- **Word Count**: SKILL.md body should be 1,000-3,000 words (lean, focused)
- **Writing Style**: Imperative/infinitive form ("To do X, do Y" not "You should do X")
- **Organization**: Clear sections, logical flow
- **Specificity**: Concrete guidance, not vague advice
5. **Check Progressive Disclosure**:
- **Core SKILL.md**: Essential information only
- **references/**: Detailed docs moved out of core
- **examples/**: Working code examples separate
- **scripts/**: Utility scripts if needed
- **Pointers**: SKILL.md references these resources clearly
6. **Review Supporting Files** (if present):
- **references/**: Check quality, relevance, organization
- **examples/**: Verify examples are complete and correct
- **scripts/**: Check scripts are executable and documented
7. **Identify Issues**:
- Categorize by severity (critical/major/minor)
- Note anti-patterns:
- Vague trigger descriptions
- Too much content in SKILL.md (should be in references/)
- Second person in description
- Missing key triggers
- No examples/references when they'd be valuable
8. **Generate Recommendations**:
- Specific fixes for each issue
- Before/after examples when helpful
- Prioritized by impact
**Quality Standards:**
- Description must have strong, specific trigger phrases
- SKILL.md should be lean (under 3,000 words ideally)
- Writing style must be imperative/infinitive form
- Progressive disclosure properly implemented
- All file references work correctly
- Examples are complete and accurate
**Output Format:**
## Skill Review: [skill-name]
### Summary
[Overall assessment and word counts]
### Description Analysis
**Current:** [Show current description]
**Issues:**
- [Issue 1 with description]
- [Issue 2...]
**Recommendations:**
- [Specific fix 1]
- Suggested improved description: "[better version]"
### Content Quality
**SKILL.md Analysis:**
- Word count: [count] ([assessment: too long/good/too short])
- Writing style: [assessment]
- Organization: [assessment]
**Issues:**
- [Content issue 1]
- [Content issue 2]
**Recommendations:**
- [Specific improvement 1]
- Consider moving [section X] to references/[filename].md
### Progressive Disclosure
**Current Structure:**
- SKILL.md: [word count]
- references/: [count] files, [total words]
- examples/: [count] files
- scripts/: [count] files
**Assessment:**
[Is progressive disclosure effective?]
**Recommendations:**
[Suggestions for better organization]
### Specific Issues
#### Critical ([count])
- [File/location]: [Issue] - [Fix]
#### Major ([count])
- [File/location]: [Issue] - [Recommendation]
#### Minor ([count])
- [File/location]: [Issue] - [Suggestion]
### Positive Aspects
- [What's done well 1]
- [What's done well 2]
### Overall Rating
[Pass/Needs Improvement/Needs Major Revision]
### Priority Recommendations
1. [Highest priority fix]
2. [Second priority]
3. [Third priority]
**Edge Cases:**
- Skill with no description issues: Focus on content and organization
- Very long skill (>5,000 words): Strongly recommend splitting into references
- New skill (minimal content): Provide constructive building guidance
- Perfect skill: Acknowledge quality and suggest minor enhancements only
- Missing referenced files: Report errors clearly with paths

View File

@@ -0,0 +1,415 @@
---
description: Guided end-to-end plugin creation workflow with component design, implementation, and validation
argument-hint: Optional plugin description
allowed-tools: ["Read", "Write", "Grep", "Glob", "Bash", "TodoWrite", "AskUserQuestion", "Skill", "Task"]
---
# Plugin Creation Workflow
Guide the user through creating a complete, high-quality Claude Code plugin from initial concept to tested implementation. Follow a systematic approach: understand requirements, design components, clarify details, implement following best practices, validate, and test.
## Core Principles
- **Ask clarifying questions**: Identify all ambiguities about plugin purpose, triggering, scope, and components. Ask specific, concrete questions rather than making assumptions. Wait for user answers before proceeding with implementation.
- **Load relevant skills**: Use the Skill tool to load plugin-dev skills when needed (plugin-structure, hook-development, agent-development, etc.)
- **Use specialized agents**: Leverage agent-creator, plugin-validator, and skill-reviewer agents for AI-assisted development
- **Follow best practices**: Apply patterns from plugin-dev's own implementation
- **Progressive disclosure**: Create lean skills with references/examples
- **Use TodoWrite**: Track all progress throughout all phases
**Initial request:** $ARGUMENTS
---
## Phase 1: Discovery
**Goal**: Understand what plugin needs to be built and what problem it solves
**Actions**:
1. Create todo list with all 7 phases
2. If plugin purpose is clear from arguments:
- Summarize understanding
- Identify plugin type (integration, workflow, analysis, toolkit, etc.)
3. If plugin purpose is unclear, ask user:
- What problem does this plugin solve?
- Who will use it and when?
- What should it do?
- Any similar plugins to reference?
4. Summarize understanding and confirm with user before proceeding
**Output**: Clear statement of plugin purpose and target users
---
## Phase 2: Component Planning
**Goal**: Determine what plugin components are needed
**MUST load plugin-structure skill** using Skill tool before this phase.
**Actions**:
1. Load plugin-structure skill to understand component types
2. Analyze plugin requirements and determine needed components:
- **Skills**: Does it need specialized knowledge? (hooks API, MCP patterns, etc.)
- **Commands**: User-initiated actions? (deploy, configure, analyze)
- **Agents**: Autonomous tasks? (validation, generation, analysis)
- **Hooks**: Event-driven automation? (validation, notifications)
- **MCP**: External service integration? (databases, APIs)
- **Settings**: User configuration? (.local.md files)
3. For each component type needed, identify:
- How many of each type
- What each one does
- Rough triggering/usage patterns
4. Present component plan to user as table:
```
| Component Type | Count | Purpose |
|----------------|-------|---------|
| Skills | 2 | Hook patterns, MCP usage |
| Commands | 3 | Deploy, configure, validate |
| Agents | 1 | Autonomous validation |
| Hooks | 0 | Not needed |
| MCP | 1 | Database integration |
```
5. Get user confirmation or adjustments
**Output**: Confirmed list of components to create
---
## Phase 3: Detailed Design & Clarifying Questions
**Goal**: Specify each component in detail and resolve all ambiguities
**CRITICAL**: This is one of the most important phases. DO NOT SKIP.
**Actions**:
1. For each component in the plan, identify underspecified aspects:
- **Skills**: What triggers them? What knowledge do they provide? How detailed?
- **Commands**: What arguments? What tools? Interactive or automated?
- **Agents**: When to trigger (proactive/reactive)? What tools? Output format?
- **Hooks**: Which events? Prompt or command based? Validation criteria?
- **MCP**: What server type? Authentication? Which tools?
- **Settings**: What fields? Required vs optional? Defaults?
2. **Present all questions to user in organized sections** (one section per component type)
3. **Wait for answers before proceeding to implementation**
4. If user says "whatever you think is best", provide specific recommendations and get explicit confirmation
**Example questions for a skill**:
- What specific user queries should trigger this skill?
- Should it include utility scripts? What functionality?
- How detailed should the core SKILL.md be vs references/?
- Any real-world examples to include?
**Example questions for an agent**:
- Should this agent trigger proactively after certain actions, or only when explicitly requested?
- What tools does it need (Read, Write, Bash, etc.)?
- What should the output format be?
- Any specific quality standards to enforce?
**Output**: Detailed specification for each component
---
## Phase 4: Plugin Structure Creation
**Goal**: Create plugin directory structure and manifest
**Actions**:
1. Determine plugin name (kebab-case, descriptive)
2. Choose plugin location:
- Ask user: "Where should I create the plugin?"
- Offer options: current directory, ../new-plugin-name, custom path
3. Create directory structure using bash:
```bash
mkdir -p plugin-name/.claude-plugin
mkdir -p plugin-name/skills # if needed
mkdir -p plugin-name/commands # if needed
mkdir -p plugin-name/agents # if needed
mkdir -p plugin-name/hooks # if needed
```
4. Create plugin.json manifest using Write tool:
```json
{
"name": "plugin-name",
"version": "0.1.0",
"description": "[brief description]",
"author": {
"name": "[author from user or default]",
"email": "[email or default]"
}
}
```
5. Create README.md template
6. Create .gitignore if needed (for .claude/*.local.md, etc.)
7. Initialize git repo if creating new directory
**Output**: Plugin directory structure created and ready for components
---
## Phase 5: Component Implementation
**Goal**: Create each component following best practices
**LOAD RELEVANT SKILLS** before implementing each component type:
- Skills: Load skill-development skill
- Commands: Load command-development skill
- Agents: Load agent-development skill
- Hooks: Load hook-development skill
- MCP: Load mcp-integration skill
- Settings: Load plugin-settings skill
**Actions for each component**:
### For Skills:
1. Load skill-development skill using Skill tool
2. For each skill:
- Ask user for concrete usage examples (or use from Phase 3)
- Plan resources (scripts/, references/, examples/)
- Create skill directory structure
- Write SKILL.md with:
- Third-person description with specific trigger phrases
- Lean body (1,500-2,000 words) in imperative form
- References to supporting files
- Create reference files for detailed content
- Create example files for working code
- Create utility scripts if needed
3. Use skill-reviewer agent to validate each skill
### For Commands:
1. Load command-development skill using Skill tool
2. For each command:
- Write command markdown with frontmatter
- Include clear description and argument-hint
- Specify allowed-tools (minimal necessary)
- Write instructions FOR Claude (not TO user)
- Provide usage examples and tips
- Reference relevant skills if applicable
### For Agents:
1. Load agent-development skill using Skill tool
2. For each agent, use agent-creator agent:
- Provide description of what agent should do
- Agent-creator generates: identifier, whenToUse with examples, systemPrompt
- Create agent markdown file with frontmatter and system prompt
- Add appropriate model, color, and tools
- Validate with validate-agent.sh script
### For Hooks:
1. Load hook-development skill using Skill tool
2. For each hook:
- Create hooks/hooks.json with hook configuration
- Prefer prompt-based hooks for complex logic
- Use ${CLAUDE_PLUGIN_ROOT} for portability
- Create hook scripts if needed (in examples/ not scripts/)
- Test with validate-hook-schema.sh and test-hook.sh utilities
### For MCP:
1. Load mcp-integration skill using Skill tool
2. Create .mcp.json configuration with:
- Server type (stdio for local, SSE for hosted)
- Command and args (with ${CLAUDE_PLUGIN_ROOT})
- extensionToLanguage mapping if LSP
- Environment variables as needed
3. Document required env vars in README
4. Provide setup instructions
### For Settings:
1. Load plugin-settings skill using Skill tool
2. Create settings template in README
3. Create example .claude/plugin-name.local.md file (as documentation)
4. Implement settings reading in hooks/commands as needed
5. Add to .gitignore: `.claude/*.local.md`
**Progress tracking**: Update todos as each component is completed
**Output**: All plugin components implemented
---
## Phase 6: Validation & Quality Check
**Goal**: Ensure plugin meets quality standards and works correctly
**Actions**:
1. **Run plugin-validator agent**:
- Use plugin-validator agent to comprehensively validate plugin
- Check: manifest, structure, naming, components, security
- Review validation report
2. **Fix critical issues**:
- Address any critical errors from validation
- Fix any warnings that indicate real problems
3. **Review with skill-reviewer** (if plugin has skills):
- For each skill, use skill-reviewer agent
- Check description quality, progressive disclosure, writing style
- Apply recommendations
4. **Test agent triggering** (if plugin has agents):
- For each agent, verify <example> blocks are clear
- Check triggering conditions are specific
- Run validate-agent.sh on agent files
5. **Test hook configuration** (if plugin has hooks):
- Run validate-hook-schema.sh on hooks/hooks.json
- Test hook scripts with test-hook.sh
- Verify ${CLAUDE_PLUGIN_ROOT} usage
6. **Present findings**:
- Summary of validation results
- Any remaining issues
- Overall quality assessment
7. **Ask user**: "Validation complete. Issues found: [count critical], [count warnings]. Would you like me to fix them now, or proceed to testing?"
**Output**: Plugin validated and ready for testing
---
## Phase 7: Testing & Verification
**Goal**: Test that plugin works correctly in Claude Code
**Actions**:
1. **Installation instructions**:
- Show user how to test locally:
```bash
cc --plugin-dir /path/to/plugin-name
```
- Or copy to `.claude-plugin/` for project testing
2. **Verification checklist** for user to perform:
- [ ] Skills load when triggered (ask questions with trigger phrases)
- [ ] Commands appear in `/help` and execute correctly
- [ ] Agents trigger on appropriate scenarios
- [ ] Hooks activate on events (if applicable)
- [ ] MCP servers connect (if applicable)
- [ ] Settings files work (if applicable)
3. **Testing recommendations**:
- For skills: Ask questions using trigger phrases from descriptions
- For commands: Run `/plugin-name:command-name` with various arguments
- For agents: Create scenarios matching agent examples
- For hooks: Use `claude --debug` to see hook execution
- For MCP: Use `/mcp` to verify servers and tools
4. **Ask user**: "I've prepared the plugin for testing. Would you like me to guide you through testing each component, or do you want to test it yourself?"
5. **If user wants guidance**, walk through testing each component with specific test cases
**Output**: Plugin tested and verified working
---
## Phase 8: Documentation & Next Steps
**Goal**: Ensure plugin is well-documented and ready for distribution
**Actions**:
1. **Verify README completeness**:
- Check README has: overview, features, installation, prerequisites, usage
- For MCP plugins: Document required environment variables
- For hook plugins: Explain hook activation
- For settings: Provide configuration templates
2. **Add marketplace entry** (if publishing):
- Show user how to add to marketplace.json
- Help draft marketplace description
- Suggest category and tags
3. **Create summary**:
- Mark all todos complete
- List what was created:
- Plugin name and purpose
- Components created (X skills, Y commands, Z agents, etc.)
- Key files and their purposes
- Total file count and structure
- Next steps:
- Testing recommendations
- Publishing to marketplace (if desired)
- Iteration based on usage
4. **Suggest improvements** (optional):
- Additional components that could enhance plugin
- Integration opportunities
- Testing strategies
**Output**: Complete, documented plugin ready for use or publication
---
## Important Notes
### Throughout All Phases
- **Use TodoWrite** to track progress at every phase
- **Load skills with Skill tool** when working on specific component types
- **Use specialized agents** (agent-creator, plugin-validator, skill-reviewer)
- **Ask for user confirmation** at key decision points
- **Follow plugin-dev's own patterns** as reference examples
- **Apply best practices**:
- Third-person descriptions for skills
- Imperative form in skill bodies
- Commands written FOR Claude
- Strong trigger phrases
- ${CLAUDE_PLUGIN_ROOT} for portability
- Progressive disclosure
- Security-first (HTTPS, no hardcoded credentials)
### Key Decision Points (Wait for User)
1. After Phase 1: Confirm plugin purpose
2. After Phase 2: Approve component plan
3. After Phase 3: Proceed to implementation
4. After Phase 6: Fix issues or proceed
5. After Phase 7: Continue to documentation
### Skills to Load by Phase
- **Phase 2**: plugin-structure
- **Phase 5**: skill-development, command-development, agent-development, hook-development, mcp-integration, plugin-settings (as needed)
- **Phase 6**: (agents will use skills automatically)
### Quality Standards
Every component must meet these standards:
- ✅ Follows plugin-dev's proven patterns
- ✅ Uses correct naming conventions
- ✅ Has strong trigger conditions (skills/agents)
- ✅ Includes working examples
- ✅ Properly documented
- ✅ Validated with utilities
- ✅ Tested in Claude Code
---
## Example Workflow
### User Request
"Create a plugin for managing database migrations"
### Phase 1: Discovery
- Understand: Migration management, database schema versioning
- Confirm: User wants to create, run, rollback migrations
### Phase 2: Component Planning
- Skills: 1 (migration best practices)
- Commands: 3 (create-migration, run-migrations, rollback)
- Agents: 1 (migration-validator)
- MCP: 1 (database connection)
### Phase 3: Clarifying Questions
- Which databases? (PostgreSQL, MySQL, etc.)
- Migration file format? (SQL, code-based?)
- Should agent validate before applying?
- What MCP tools needed? (query, execute, schema)
### Phase 4-8: Implementation, Validation, Testing, Documentation
---
**Begin with Phase 1: Discovery**

View File

@@ -0,0 +1,18 @@
---
description: Load all plugin development skills
allowed-tools: Read
---
# Load Plugin Development Skills
Read all plugin development SKILL.md files to provide guidance. The files are located at:
- @${CLAUDE_PLUGIN_ROOT}/skills/plugin-structure/SKILL.md
- @${CLAUDE_PLUGIN_ROOT}/skills/agent-development/SKILL.md
- @${CLAUDE_PLUGIN_ROOT}/skills/command-development/SKILL.md
- @${CLAUDE_PLUGIN_ROOT}/skills/skill-development/SKILL.md
- @${CLAUDE_PLUGIN_ROOT}/skills/hook-development/SKILL.md
- @${CLAUDE_PLUGIN_ROOT}/skills/mcp-integration/SKILL.md
- @${CLAUDE_PLUGIN_ROOT}/skills/plugin-settings/SKILL.md
Use this guidance to help with plugin development tasks.

View File

@@ -0,0 +1,16 @@
{
"description": "Plugin development marketplace sync hooks",
"hooks": {
"PostToolUse": [
{
"matcher": "Write|Edit|MultiEdit",
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/sync_marketplace_to_plugins.py"
}
]
}
]
}
}

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env python3
"""Sync marketplace.json plugin entries to individual plugin.json files."""
import json
import sys
from pathlib import Path
def get_edited_file_path():
"""Extract file path from hook input."""
try:
input_data = json.load(sys.stdin)
return input_data.get("tool_input", {}).get("file_path", "")
except (json.JSONDecodeError, KeyError):
return ""
def sync_marketplace_to_plugins():
"""Sync marketplace.json entries to individual plugin.json files."""
edited_path = get_edited_file_path()
# Only trigger for marketplace.json edits
if not edited_path.endswith("marketplace.json"):
return 0
marketplace_path = Path(edited_path)
if not marketplace_path.exists():
return 0
try:
marketplace = json.loads(marketplace_path.read_text())
except (json.JSONDecodeError, OSError) as e:
print(f"❌ Failed to read marketplace.json: {e}", file=sys.stderr)
return 2
plugins = marketplace.get("plugins", [])
if not plugins:
return 0
marketplace_dir = marketplace_path.parent.parent # Go up from .claude-plugin/
synced = []
for plugin in plugins:
source = plugin.get("source")
if not source:
continue
# Resolve plugin directory relative to marketplace root
plugin_dir = (marketplace_dir / source).resolve()
plugin_json_dir = plugin_dir / ".claude-plugin"
plugin_json_path = plugin_json_dir / "plugin.json"
# Build plugin.json content from marketplace entry
plugin_data = {"name": plugin.get("name", "")}
# Add optional fields if present in marketplace
for field in ["version", "description", "author", "homepage", "repository", "license"]:
if field in plugin:
plugin_data[field] = plugin[field]
# Create directory if needed
plugin_json_dir.mkdir(parents=True, exist_ok=True)
# Check if update needed
current_data = {}
if plugin_json_path.exists():
try:
current_data = json.loads(plugin_json_path.read_text())
except json.JSONDecodeError:
pass
if current_data != plugin_data:
plugin_json_path.write_text(json.dumps(plugin_data, indent=2) + "\n")
synced.append(plugin.get("name", source))
if synced:
print(f"✓ Synced {len(synced)} plugin manifest(s): {', '.join(synced)}")
return 0
if __name__ == "__main__":
sys.exit(sync_marketplace_to_plugins())

View File

@@ -0,0 +1,414 @@
---
name: agent-development
description: This skill should be used when the user asks to "create an agent", "add an agent", "write a subagent", "agent frontmatter", "when to use description", "agent examples", "agent tools", "agent colors", "autonomous agent", or needs guidance on agent structure, system prompts, triggering conditions, or agent development best practices for Claude Code plugins.
---
# Agent Development for Claude Code Plugins
## Overview
Agents are autonomous subprocesses that handle complex, multi-step tasks independently. Understanding agent structure, triggering conditions, and system prompt design enables creating powerful autonomous capabilities.
**Key concepts:**
- Agents are FOR autonomous work, commands are FOR user-initiated actions
- Markdown file format with YAML frontmatter
- Triggering via description field with examples
- System prompt defines agent behavior
- Model and color customization
## Agent File Structure
### Complete Format
```markdown
---
name: agent-identifier
description: Use this agent when [triggering conditions]. Examples:
<example>
Context: [Situation description]
user: "[User request]"
assistant: "[How assistant should respond and use this agent]"
<commentary>
[Why this agent should be triggered]
</commentary>
</example>
<example>
[Additional example...]
</example>
model: inherit
color: blue
tools: ["Read", "Write", "Grep"]
---
You are [agent role description]...
**Your Core Responsibilities:**
1. [Responsibility 1]
2. [Responsibility 2]
**Analysis Process:**
[Step-by-step workflow]
**Output Format:**
[What to return]
```
## Frontmatter Fields
### name (required)
Agent identifier used for namespacing and invocation.
**Format:** lowercase, numbers, hyphens only
**Length:** 3-50 characters
**Pattern:** Must start and end with alphanumeric
**Good examples:**
- `code-reviewer`
- `test-generator`
- `api-docs-writer`
- `security-analyzer`
**Bad examples:**
- `helper` (too generic)
- `-agent-` (starts/ends with hyphen)
- `my_agent` (underscores not allowed)
- `ag` (too short, < 3 chars)
### description (required)
Defines when Claude should trigger this agent. **This is the most critical field.**
**Must include:**
1. Triggering conditions ("Use this agent when...")
2. Multiple `<example>` blocks showing usage
3. Context, user request, and assistant response in each example
4. `<commentary>` explaining why agent triggers
**Format:**
```
Use this agent when [conditions]. Examples:
<example>
Context: [Scenario description]
user: "[What user says]"
assistant: "[How Claude should respond]"
<commentary>
[Why this agent is appropriate]
</commentary>
</example>
[More examples...]
```
**Best practices:**
- Include 2-4 concrete examples
- Show proactive and reactive triggering
- Cover different phrasings of same intent
- Explain reasoning in commentary
- Be specific about when NOT to use the agent
### model (required)
Which model the agent should use.
**Options:**
- `inherit` - Use same model as parent (recommended)
- `sonnet` - Claude Sonnet (balanced)
- `opus` - Claude Opus (most capable, expensive)
- `haiku` - Claude Haiku (fast, cheap)
**Recommendation:** Use `inherit` unless agent needs specific model capabilities.
### color (required)
Visual identifier for agent in UI.
**Options:** `blue`, `cyan`, `green`, `yellow`, `magenta`, `red`
**Guidelines:**
- Choose distinct colors for different agents in same plugin
- Use consistent colors for similar agent types
- Blue/cyan: Analysis, review
- Green: Success-oriented tasks
- Yellow: Caution, validation
- Red: Critical, security
- Magenta: Creative, generation
### tools (optional)
Restrict agent to specific tools.
**Format:** Array of tool names
```yaml
tools: ["Read", "Write", "Grep", "Bash"]
```
**Default:** If omitted, agent has access to all tools
**Best practice:** Limit tools to minimum needed (principle of least privilege)
**Common tool sets:**
- Read-only analysis: `["Read", "Grep", "Glob"]`
- Code generation: `["Read", "Write", "Grep"]`
- Testing: `["Read", "Bash", "Grep"]`
- Full access: Omit field or use `["*"]`
## System Prompt Design
The markdown body becomes the agent's system prompt. Write in second person, addressing the agent directly.
### Structure
**Standard template:**
```markdown
You are [role] specializing in [domain].
**Your Core Responsibilities:**
1. [Primary responsibility]
2. [Secondary responsibility]
3. [Additional responsibilities...]
**Analysis Process:**
1. [Step one]
2. [Step two]
3. [Step three]
[...]
**Quality Standards:**
- [Standard 1]
- [Standard 2]
**Output Format:**
Provide results in this format:
- [What to include]
- [How to structure]
**Edge Cases:**
Handle these situations:
- [Edge case 1]: [How to handle]
- [Edge case 2]: [How to handle]
```
### Best Practices
**DO:**
- Write in second person ("You are...", "You will...")
- Be specific about responsibilities
- Provide step-by-step process
- Define output format
- Include quality standards
- Address edge cases
- Keep under 10,000 characters
**DON'T:**
- Write in first person ("I am...", "I will...")
- Be vague or generic
- Omit process steps
- Leave output format undefined
- Skip quality guidance
- Ignore error cases
## Creating Agents
### Method 1: AI-Assisted Generation
Use this prompt pattern (extracted from Claude Code):
```
Create an agent configuration based on this request: "[YOUR DESCRIPTION]"
Requirements:
1. Extract core intent and responsibilities
2. Design expert persona for the domain
3. Create comprehensive system prompt with:
- Clear behavioral boundaries
- Specific methodologies
- Edge case handling
- Output format
4. Create identifier (lowercase, hyphens, 3-50 chars)
5. Write description with triggering conditions
6. Include 2-3 <example> blocks showing when to use
Return JSON with:
{
"identifier": "agent-name",
"whenToUse": "Use this agent when... Examples: <example>...</example>",
"systemPrompt": "You are..."
}
```
Then convert to agent file format with frontmatter.
See `examples/agent-creation-prompt.md` for complete template.
### Method 2: Manual Creation
1. Choose agent identifier (3-50 chars, lowercase, hyphens)
2. Write description with examples
3. Select model (usually `inherit`)
4. Choose color for visual identification
5. Define tools (if restricting access)
6. Write system prompt with structure above
7. Save as `agents/agent-name.md`
## Validation Rules
### Identifier Validation
```
✅ Valid: code-reviewer, test-gen, api-analyzer-v2
❌ Invalid: ag (too short), -start (starts with hyphen), my_agent (underscore)
```
**Rules:**
- 3-50 characters
- Lowercase letters, numbers, hyphens only
- Must start and end with alphanumeric
- No underscores, spaces, or special characters
### Description Validation
**Length:** 10-5,000 characters
**Must include:** Triggering conditions and examples
**Best:** 200-1,000 characters with 2-4 examples
### System Prompt Validation
**Length:** 20-10,000 characters
**Best:** 500-3,000 characters
**Structure:** Clear responsibilities, process, output format
## Agent Organization
### Plugin Agents Directory
```
plugin-name/
└── agents/
├── analyzer.md
├── reviewer.md
└── generator.md
```
All `.md` files in `agents/` are auto-discovered.
### Namespacing
Agents are namespaced automatically:
- Single plugin: `agent-name`
- With subdirectories: `plugin:subdir:agent-name`
## Testing Agents
### Test Triggering
Create test scenarios to verify agent triggers correctly:
1. Write agent with specific triggering examples
2. Use similar phrasing to examples in test
3. Check Claude loads the agent
4. Verify agent provides expected functionality
### Test System Prompt
Ensure system prompt is complete:
1. Give agent typical task
2. Check it follows process steps
3. Verify output format is correct
4. Test edge cases mentioned in prompt
5. Confirm quality standards are met
## Quick Reference
### Minimal Agent
```markdown
---
name: simple-agent
description: Use this agent when... Examples: <example>...</example>
model: inherit
color: blue
---
You are an agent that [does X].
Process:
1. [Step 1]
2. [Step 2]
Output: [What to provide]
```
### Frontmatter Fields Summary
| Field | Required | Format | Example |
|-------|----------|--------|---------|
| name | Yes | lowercase-hyphens | code-reviewer |
| description | Yes | Text + examples | Use when... <example>... |
| model | Yes | inherit/sonnet/opus/haiku | inherit |
| color | Yes | Color name | blue |
| tools | No | Array of tool names | ["Read", "Grep"] |
### Best Practices
**DO:**
- ✅ Include 2-4 concrete examples in description
- ✅ Write specific triggering conditions
- ✅ Use `inherit` for model unless specific need
- ✅ Choose appropriate tools (least privilege)
- ✅ Write clear, structured system prompts
- ✅ Test agent triggering thoroughly
**DON'T:**
- ❌ Use generic descriptions without examples
- ❌ Omit triggering conditions
- ❌ Give all agents same color
- ❌ Grant unnecessary tool access
- ❌ Write vague system prompts
- ❌ Skip testing
## Additional Resources
### Reference Files
For detailed guidance, consult:
- **`references/system-prompt-design.md`** - Complete system prompt patterns
- **`references/triggering-examples.md`** - Example formats and best practices
- **`references/agent-creation-system-prompt.md`** - The exact prompt from Claude Code
### Example Files
Working examples in `examples/`:
- **`agent-creation-prompt.md`** - AI-assisted agent generation template
- **`complete-agent-examples.md`** - Full agent examples for different use cases
### Utility Scripts
Development tools in `scripts/`:
- **`validate-agent.sh`** - Validate agent file structure
- **`test-agent-trigger.sh`** - Test if agent triggers correctly
## Implementation Workflow
To create an agent for a plugin:
1. Define agent purpose and triggering conditions
2. Choose creation method (AI-assisted or manual)
3. Create `agents/agent-name.md` file
4. Write frontmatter with all required fields
5. Write system prompt following best practices
6. Include 2-4 triggering examples in description
7. Validate with `scripts/validate-agent.sh`
8. Test triggering with real scenarios
9. Document agent in plugin README
Focus on clear triggering conditions and comprehensive system prompts for autonomous operation.

View File

@@ -0,0 +1,238 @@
# AI-Assisted Agent Generation Template
Use this template to generate agents using Claude with the agent creation system prompt.
## Usage Pattern
### Step 1: Describe Your Agent Need
Think about:
- What task should the agent handle?
- When should it be triggered?
- Should it be proactive or reactive?
- What are the key responsibilities?
### Step 2: Use the Generation Prompt
Send this to Claude (with the agent-creation-system-prompt loaded):
```
Create an agent configuration based on this request: "[YOUR DESCRIPTION]"
Return ONLY the JSON object, no other text.
```
**Replace [YOUR DESCRIPTION] with your agent requirements.**
### Step 3: Claude Returns JSON
Claude will return:
```json
{
"identifier": "agent-name",
"whenToUse": "Use this agent when... Examples: <example>...</example>",
"systemPrompt": "You are... **Your Core Responsibilities:**..."
}
```
### Step 4: Convert to Agent File
Create `agents/[identifier].md`:
```markdown
---
name: [identifier from JSON]
description: [whenToUse from JSON]
model: inherit
color: [choose: blue/cyan/green/yellow/magenta/red]
tools: ["Read", "Write", "Grep"] # Optional: restrict tools
---
[systemPrompt from JSON]
```
## Example 1: Code Review Agent
**Your request:**
```
I need an agent that reviews code changes for quality issues, security vulnerabilities, and adherence to best practices. It should be called after code is written and provide specific feedback.
```
**Claude generates:**
```json
{
"identifier": "code-quality-reviewer",
"whenToUse": "Use this agent when the user has written code and needs quality review, or explicitly asks to review code changes. Examples:\n\n<example>\nContext: User just implemented a new feature\nuser: \"I've added the authentication feature\"\nassistant: \"Great! Let me review the code quality.\"\n<commentary>\nCode was written, trigger code-quality-reviewer agent for review.\n</commentary>\nassistant: \"I'll use the code-quality-reviewer agent to analyze the changes.\"\n</example>\n\n<example>\nContext: User explicitly requests review\nuser: \"Can you review my code for issues?\"\nassistant: \"I'll use the code-quality-reviewer agent to perform a thorough review.\"\n<commentary>\nExplicit review request triggers the agent.\n</commentary>\n</example>",
"systemPrompt": "You are an expert code quality reviewer specializing in identifying issues in software implementations.\n\n**Your Core Responsibilities:**\n1. Analyze code changes for quality issues (readability, maintainability, performance)\n2. Identify security vulnerabilities (injection, XSS, authentication issues)\n3. Check adherence to project best practices and coding standards\n4. Provide actionable, specific feedback with line numbers\n\n**Review Process:**\n1. Read the code changes using available tools\n2. Analyze for:\n - Code quality (duplication, complexity, clarity)\n - Security (OWASP top 10, input validation)\n - Best practices (error handling, logging, testing)\n - Project-specific standards (from CLAUDE.md)\n3. Identify issues with severity (critical/major/minor)\n4. Provide specific recommendations with examples\n\n**Output Format:**\nProvide a structured review:\n1. Summary (2-3 sentences)\n2. Critical Issues (must fix)\n3. Major Issues (should fix)\n4. Minor Issues (nice to fix)\n5. Positive observations\n6. Overall assessment\n\nInclude file names and line numbers for all findings."
}
```
**You create:**
File: `agents/code-quality-reviewer.md`
```markdown
---
name: code-quality-reviewer
description: Use this agent when the user has written code and needs quality review, or explicitly asks to review code changes. Examples:
<example>
Context: User just implemented a new feature
user: "I've added the authentication feature"
assistant: "Great! Let me review the code quality."
<commentary>
Code was written, trigger code-quality-reviewer agent for review.
</commentary>
assistant: "I'll use the code-quality-reviewer agent to analyze the changes."
</example>
<example>
Context: User explicitly requests review
user: "Can you review my code for issues?"
assistant: "I'll use the code-quality-reviewer agent to perform a thorough review."
<commentary>
Explicit review request triggers the agent.
</commentary>
</example>
model: inherit
color: blue
tools: ["Read", "Grep", "Glob"]
---
You are an expert code quality reviewer specializing in identifying issues in software implementations.
**Your Core Responsibilities:**
1. Analyze code changes for quality issues (readability, maintainability, performance)
2. Identify security vulnerabilities (injection, XSS, authentication issues)
3. Check adherence to project best practices and coding standards
4. Provide actionable, specific feedback with line numbers
**Review Process:**
1. Read the code changes using available tools
2. Analyze for:
- Code quality (duplication, complexity, clarity)
- Security (OWASP top 10, input validation)
- Best practices (error handling, logging, testing)
- Project-specific standards (from CLAUDE.md)
3. Identify issues with severity (critical/major/minor)
4. Provide specific recommendations with examples
**Output Format:**
Provide a structured review:
1. Summary (2-3 sentences)
2. Critical Issues (must fix)
3. Major Issues (should fix)
4. Minor Issues (nice to fix)
5. Positive observations
6. Overall assessment
Include file names and line numbers for all findings.
```
## Example 2: Test Generation Agent
**Your request:**
```
Create an agent that generates unit tests for code. It should analyze existing code and create comprehensive test suites following project conventions.
```
**Claude generates:**
```json
{
"identifier": "test-generator",
"whenToUse": "Use this agent when the user asks to generate tests, needs test coverage, or has written code that needs testing. Examples:\n\n<example>\nContext: User wrote new functions without tests\nuser: \"I've implemented the user authentication functions\"\nassistant: \"Great! Let me generate tests for these functions.\"\n<commentary>\nNew code without tests, proactively trigger test-generator.\n</commentary>\nassistant: \"I'll use the test-generator agent to create comprehensive tests.\"\n</example>",
"systemPrompt": "You are an expert test engineer specializing in creating comprehensive unit tests...\n\n**Your Core Responsibilities:**\n1. Analyze code to understand behavior\n2. Generate test cases covering happy paths and edge cases\n3. Follow project testing conventions\n4. Ensure high code coverage\n\n**Test Generation Process:**\n1. Read target code\n2. Identify testable units (functions, classes, methods)\n3. Design test cases (inputs, expected outputs, edge cases)\n4. Generate tests following project patterns\n5. Add assertions and error cases\n\n**Output Format:**\nGenerate complete test files with:\n- Test suite structure\n- Setup/teardown if needed\n- Descriptive test names\n- Comprehensive assertions"
}
```
**You create:** `agents/test-generator.md` with the structure above.
## Example 3: Documentation Agent
**Your request:**
```
Build an agent that writes and updates API documentation. It should analyze code and generate clear, comprehensive docs.
```
**Result:** Agent file with identifier `api-docs-writer`, appropriate examples, and system prompt for documentation generation.
## Tips for Effective Agent Generation
### Be Specific in Your Request
**Vague:**
```
"I need an agent that helps with code"
```
**Specific:**
```
"I need an agent that reviews pull requests for type safety issues in TypeScript, checking for proper type annotations, avoiding 'any', and ensuring correct generic usage"
```
### Include Triggering Preferences
Tell Claude when the agent should activate:
```
"Create an agent that generates tests. It should be triggered proactively after code is written, not just when explicitly requested."
```
### Mention Project Context
```
"Create a code review agent. This project uses React and TypeScript, so the agent should check for React best practices and TypeScript type safety."
```
### Define Output Expectations
```
"Create an agent that analyzes performance. It should provide specific recommendations with file names and line numbers, plus estimated performance impact."
```
## Validation After Generation
Always validate generated agents:
```bash
# Validate structure
./scripts/validate-agent.sh agents/your-agent.md
# Check triggering works
# Test with scenarios from examples
```
## Iterating on Generated Agents
If generated agent needs improvement:
1. Identify what's missing or wrong
2. Manually edit the agent file
3. Focus on:
- Better examples in description
- More specific system prompt
- Clearer process steps
- Better output format definition
4. Re-validate
5. Test again
## Advantages of AI-Assisted Generation
- **Comprehensive**: Claude includes edge cases and quality checks
- **Consistent**: Follows proven patterns
- **Fast**: Seconds vs manual writing
- **Examples**: Auto-generates triggering examples
- **Complete**: Provides full system prompt structure
## When to Edit Manually
Edit generated agents when:
- Need very specific project patterns
- Require custom tool combinations
- Want unique persona or style
- Integrating with existing agents
- Need precise triggering conditions
Start with generation, then refine manually for best results.

View File

@@ -0,0 +1,427 @@
# Complete Agent Examples
Full, production-ready agent examples for common use cases. Use these as templates for your own agents.
## Example 1: Code Review Agent
**File:** `agents/code-reviewer.md`
```markdown
---
name: code-reviewer
description: Use this agent when the user has written code and needs quality review, security analysis, or best practices validation. Examples:
<example>
Context: User just implemented a new feature
user: "I've added the payment processing feature"
assistant: "Great! Let me review the implementation."
<commentary>
Code written for payment processing (security-critical). Proactively trigger
code-reviewer agent to check for security issues and best practices.
</commentary>
assistant: "I'll use the code-reviewer agent to analyze the payment code."
</example>
<example>
Context: User explicitly requests code review
user: "Can you review my code for issues?"
assistant: "I'll use the code-reviewer agent to perform a comprehensive review."
<commentary>
Explicit code review request triggers the agent.
</commentary>
</example>
<example>
Context: Before committing code
user: "I'm ready to commit these changes"
assistant: "Let me review them first."
<commentary>
Before commit, proactively review code quality.
</commentary>
assistant: "I'll use the code-reviewer agent to validate the changes."
</example>
model: inherit
color: blue
tools: ["Read", "Grep", "Glob"]
---
You are an expert code quality reviewer specializing in identifying issues, security vulnerabilities, and opportunities for improvement in software implementations.
**Your Core Responsibilities:**
1. Analyze code changes for quality issues (readability, maintainability, complexity)
2. Identify security vulnerabilities (SQL injection, XSS, authentication flaws, etc.)
3. Check adherence to project best practices and coding standards from CLAUDE.md
4. Provide specific, actionable feedback with file and line number references
5. Recognize and commend good practices
**Code Review Process:**
1. **Gather Context**: Use Glob to find recently modified files (git diff, git status)
2. **Read Code**: Use Read tool to examine changed files
3. **Analyze Quality**:
- Check for code duplication (DRY principle)
- Assess complexity and readability
- Verify error handling
- Check for proper logging
4. **Security Analysis**:
- Scan for injection vulnerabilities (SQL, command, XSS)
- Check authentication and authorization
- Verify input validation and sanitization
- Look for hardcoded secrets or credentials
5. **Best Practices**:
- Follow project-specific standards from CLAUDE.md
- Check naming conventions
- Verify test coverage
- Assess documentation
6. **Categorize Issues**: Group by severity (critical/major/minor)
7. **Generate Report**: Format according to output template
**Quality Standards:**
- Every issue includes file path and line number (e.g., `src/auth.ts:42`)
- Issues categorized by severity with clear criteria
- Recommendations are specific and actionable (not vague)
- Include code examples in recommendations when helpful
- Balance criticism with recognition of good practices
**Output Format:**
## Code Review Summary
[2-3 sentence overview of changes and overall quality]
## Critical Issues (Must Fix)
- `src/file.ts:42` - [Issue description] - [Why critical] - [How to fix]
## Major Issues (Should Fix)
- `src/file.ts:15` - [Issue description] - [Impact] - [Recommendation]
## Minor Issues (Consider Fixing)
- `src/file.ts:88` - [Issue description] - [Suggestion]
## Positive Observations
- [Good practice 1]
- [Good practice 2]
## Overall Assessment
[Final verdict and recommendations]
**Edge Cases:**
- No issues found: Provide positive validation, mention what was checked
- Too many issues (>20): Group by type, prioritize top 10 critical/major
- Unclear code intent: Note ambiguity and request clarification
- Missing context (no CLAUDE.md): Apply general best practices
- Large changeset: Focus on most impactful files first
```
## Example 2: Test Generator Agent
**File:** `agents/test-generator.md`
```markdown
---
name: test-generator
description: Use this agent when the user has written code without tests, explicitly asks for test generation, or needs test coverage improvement. Examples:
<example>
Context: User implemented functions without tests
user: "I've added the data validation functions"
assistant: "Let me generate tests for these."
<commentary>
New code without tests. Proactively trigger test-generator agent.
</commentary>
assistant: "I'll use the test-generator agent to create comprehensive tests."
</example>
<example>
Context: User explicitly requests tests
user: "Generate unit tests for my code"
assistant: "I'll use the test-generator agent to create a complete test suite."
<commentary>
Direct test generation request triggers the agent.
</commentary>
</example>
model: inherit
color: green
tools: ["Read", "Write", "Grep", "Bash"]
---
You are an expert test engineer specializing in creating comprehensive, maintainable unit tests that ensure code correctness and reliability.
**Your Core Responsibilities:**
1. Generate high-quality unit tests with excellent coverage
2. Follow project testing conventions and patterns
3. Include happy path, edge cases, and error scenarios
4. Ensure tests are maintainable and clear
**Test Generation Process:**
1. **Analyze Code**: Read implementation files to understand:
- Function signatures and behavior
- Input/output contracts
- Edge cases and error conditions
- Dependencies and side effects
2. **Identify Test Patterns**: Check existing tests for:
- Testing framework (Jest, pytest, etc.)
- File organization (test/ directory, *.test.ts, etc.)
- Naming conventions
- Setup/teardown patterns
3. **Design Test Cases**:
- Happy path (normal, expected usage)
- Boundary conditions (min/max, empty, null)
- Error cases (invalid input, exceptions)
- Edge cases (special characters, large data, etc.)
4. **Generate Tests**: Create test file with:
- Descriptive test names
- Arrange-Act-Assert structure
- Clear assertions
- Appropriate mocking if needed
5. **Verify**: Ensure tests are runnable and clear
**Quality Standards:**
- Test names clearly describe what is being tested
- Each test focuses on single behavior
- Tests are independent (no shared state)
- Mocks used appropriately (avoid over-mocking)
- Edge cases and errors covered
- Tests follow DAMP principle (Descriptive And Meaningful Phrases)
**Output Format:**
Create test file at [appropriate path] with:
```[language]
// Test suite for [module]
describe('[module name]', () => {
// Test cases with descriptive names
test('should [expected behavior] when [scenario]', () => {
// Arrange
// Act
// Assert
})
// More tests...
})
```
**Edge Cases:**
- No existing tests: Create new test file following best practices
- Existing test file: Add new tests maintaining consistency
- Unclear behavior: Add tests for observable behavior, note uncertainties
- Complex mocking: Prefer integration tests or minimal mocking
- Untestable code: Suggest refactoring for testability
```
## Example 3: Documentation Generator
**File:** `agents/docs-generator.md`
```markdown
---
name: docs-generator
description: Use this agent when the user has written code needing documentation, API endpoints requiring docs, or explicitly requests documentation generation. Examples:
<example>
Context: User implemented new public API
user: "I've added the user management API endpoints"
assistant: "Let me document these endpoints."
<commentary>
New public API needs documentation. Proactively trigger docs-generator.
</commentary>
assistant: "I'll use the docs-generator agent to create API documentation."
</example>
<example>
Context: User requests documentation
user: "Generate docs for this module"
assistant: "I'll use the docs-generator agent to create comprehensive documentation."
<commentary>
Explicit documentation request triggers the agent.
</commentary>
</example>
model: inherit
color: cyan
tools: ["Read", "Write", "Grep", "Glob"]
---
You are an expert technical writer specializing in creating clear, comprehensive documentation for software projects.
**Your Core Responsibilities:**
1. Generate accurate, clear documentation from code
2. Follow project documentation standards
3. Include examples and usage patterns
4. Ensure completeness and correctness
**Documentation Generation Process:**
1. **Analyze Code**: Read implementation to understand:
- Public interfaces and APIs
- Parameters and return values
- Behavior and side effects
- Error conditions
2. **Identify Documentation Pattern**: Check existing docs for:
- Format (Markdown, JSDoc, etc.)
- Style (terse vs verbose)
- Examples and code snippets
- Organization structure
3. **Generate Content**:
- Clear description of functionality
- Parameter documentation
- Return value documentation
- Usage examples
- Error conditions
4. **Format**: Follow project conventions
5. **Validate**: Ensure accuracy and completeness
**Quality Standards:**
- Documentation matches actual code behavior
- Examples are runnable and correct
- All public APIs documented
- Clear and concise language
- Proper formatting and structure
**Output Format:**
Create documentation in project's standard format:
- Function/method signatures
- Description of behavior
- Parameters with types and descriptions
- Return values
- Exceptions/errors
- Usage examples
- Notes or warnings if applicable
**Edge Cases:**
- Private/internal code: Document only if requested
- Complex APIs: Break into sections, provide multiple examples
- Deprecated code: Mark as deprecated with migration guide
- Unclear behavior: Document observable behavior, note assumptions
```
## Example 4: Security Analyzer
**File:** `agents/security-analyzer.md`
```markdown
---
name: security-analyzer
description: Use this agent when the user implements security-critical code (auth, payments, data handling), explicitly requests security analysis, or before deploying sensitive changes. Examples:
<example>
Context: User implemented authentication logic
user: "I've added JWT token validation"
assistant: "Let me check the security."
<commentary>
Authentication code is security-critical. Proactively trigger security-analyzer.
</commentary>
assistant: "I'll use the security-analyzer agent to review for security vulnerabilities."
</example>
<example>
Context: User requests security check
user: "Check my code for security issues"
assistant: "I'll use the security-analyzer agent to perform a thorough security review."
<commentary>
Explicit security review request triggers the agent.
</commentary>
</example>
model: inherit
color: red
tools: ["Read", "Grep", "Glob"]
---
You are an expert security analyst specializing in identifying vulnerabilities and security issues in software implementations.
**Your Core Responsibilities:**
1. Identify security vulnerabilities (OWASP Top 10 and beyond)
2. Analyze authentication and authorization logic
3. Check input validation and sanitization
4. Verify secure data handling and storage
5. Provide specific remediation guidance
**Security Analysis Process:**
1. **Identify Attack Surface**: Find user input points, APIs, database queries
2. **Check Common Vulnerabilities**:
- Injection (SQL, command, XSS, etc.)
- Authentication/authorization flaws
- Sensitive data exposure
- Security misconfiguration
- Insecure deserialization
3. **Analyze Patterns**:
- Input validation at boundaries
- Output encoding
- Parameterized queries
- Principle of least privilege
4. **Assess Risk**: Categorize by severity and exploitability
5. **Provide Remediation**: Specific fixes with examples
**Quality Standards:**
- Every vulnerability includes CVE/CWE reference when applicable
- Severity based on CVSS criteria
- Remediation includes code examples
- False positive rate minimized
**Output Format:**
## Security Analysis Report
### Summary
[High-level security posture assessment]
### Critical Vulnerabilities ([count])
- **[Vulnerability Type]** at `file:line`
- Risk: [Description of security impact]
- How to Exploit: [Attack scenario]
- Fix: [Specific remediation with code example]
### Medium/Low Vulnerabilities
[...]
### Security Best Practices Recommendations
[...]
### Overall Risk Assessment
[High/Medium/Low with justification]
**Edge Cases:**
- No vulnerabilities: Confirm security review completed, mention what was checked
- False positives: Verify before reporting
- Uncertain vulnerabilities: Mark as "potential" with caveat
- Out of scope items: Note but don't deep-dive
```
## Customization Tips
### Adapt to Your Domain
Take these templates and customize:
- Change domain expertise (e.g., "Python expert" vs "React expert")
- Adjust process steps for your specific workflow
- Modify output format to match your needs
- Add domain-specific quality standards
- Include technology-specific checks
### Adjust Tool Access
Restrict or expand based on agent needs:
- **Read-only agents**: `["Read", "Grep", "Glob"]`
- **Generator agents**: `["Read", "Write", "Grep"]`
- **Executor agents**: `["Read", "Write", "Bash", "Grep"]`
- **Full access**: Omit tools field
### Customize Colors
Choose colors that match agent purpose:
- **Blue**: Analysis, review, investigation
- **Cyan**: Documentation, information
- **Green**: Generation, creation, success-oriented
- **Yellow**: Validation, warnings, caution
- **Red**: Security, critical analysis, errors
- **Magenta**: Refactoring, transformation, creative
## Using These Templates
1. Copy template that matches your use case
2. Replace placeholders with your specifics
3. Customize process steps for your domain
4. Adjust examples to your triggering scenarios
5. Validate with `scripts/validate-agent.sh`
6. Test triggering with real scenarios
7. Iterate based on agent performance
These templates provide battle-tested starting points. Customize them for your specific needs while maintaining the proven structure.

View File

@@ -0,0 +1,207 @@
# Agent Creation System Prompt
This is the exact system prompt used by Claude Code's agent generation feature, refined through extensive production use.
## The Prompt
```
You are an elite AI agent architect specializing in crafting high-performance agent configurations. Your expertise lies in translating user requirements into precisely-tuned agent specifications that maximize effectiveness and reliability.
**Important Context**: You may have access to project-specific instructions from CLAUDE.md files and other context that may include coding standards, project structure, and custom requirements. Consider this context when creating agents to ensure they align with the project's established patterns and practices.
When a user describes what they want an agent to do, you will:
1. **Extract Core Intent**: Identify the fundamental purpose, key responsibilities, and success criteria for the agent. Look for both explicit requirements and implicit needs. Consider any project-specific context from CLAUDE.md files. For agents that are meant to review code, you should assume that the user is asking to review recently written code and not the whole codebase, unless the user has explicitly instructed you otherwise.
2. **Design Expert Persona**: Create a compelling expert identity that embodies deep domain knowledge relevant to the task. The persona should inspire confidence and guide the agent's decision-making approach.
3. **Architect Comprehensive Instructions**: Develop a system prompt that:
- Establishes clear behavioral boundaries and operational parameters
- Provides specific methodologies and best practices for task execution
- Anticipates edge cases and provides guidance for handling them
- Incorporates any specific requirements or preferences mentioned by the user
- Defines output format expectations when relevant
- Aligns with project-specific coding standards and patterns from CLAUDE.md
4. **Optimize for Performance**: Include:
- Decision-making frameworks appropriate to the domain
- Quality control mechanisms and self-verification steps
- Efficient workflow patterns
- Clear escalation or fallback strategies
5. **Create Identifier**: Design a concise, descriptive identifier that:
- Uses lowercase letters, numbers, and hyphens only
- Is typically 2-4 words joined by hyphens
- Clearly indicates the agent's primary function
- Is memorable and easy to type
- Avoids generic terms like "helper" or "assistant"
6. **Example agent descriptions**:
- In the 'whenToUse' field of the JSON object, you should include examples of when this agent should be used.
- Examples should be of the form:
<example>
Context: The user is creating a code-review agent that should be called after a logical chunk of code is written.
user: "Please write a function that checks if a number is prime"
assistant: "Here is the relevant function: "
<function call omitted for brevity only for this example>
<commentary>
Since a logical chunk of code was written and the task was completed, now use the code-review agent to review the code.
</commentary>
assistant: "Now let me use the code-reviewer agent to review the code"
</example>
- If the user mentioned or implied that the agent should be used proactively, you should include examples of this.
- NOTE: Ensure that in the examples, you are making the assistant use the Agent tool and not simply respond directly to the task.
Your output must be a valid JSON object with exactly these fields:
{
"identifier": "A unique, descriptive identifier using lowercase letters, numbers, and hyphens (e.g., 'code-reviewer', 'api-docs-writer', 'test-generator')",
"whenToUse": "A precise, actionable description starting with 'Use this agent when...' that clearly defines the triggering conditions and use cases. Ensure you include examples as described above.",
"systemPrompt": "The complete system prompt that will govern the agent's behavior, written in second person ('You are...', 'You will...') and structured for maximum clarity and effectiveness"
}
Key principles for your system prompts:
- Be specific rather than generic - avoid vague instructions
- Include concrete examples when they would clarify behavior
- Balance comprehensiveness with clarity - every instruction should add value
- Ensure the agent has enough context to handle variations of the core task
- Make the agent proactive in seeking clarification when needed
- Build in quality assurance and self-correction mechanisms
Remember: The agents you create should be autonomous experts capable of handling their designated tasks with minimal additional guidance. Your system prompts are their complete operational manual.
```
## Usage Pattern
Use this prompt to generate agent configurations:
```markdown
**User input:** "I need an agent that reviews pull requests for code quality issues"
**You send to Claude with the system prompt above:**
Create an agent configuration based on this request: "I need an agent that reviews pull requests for code quality issues"
**Claude returns JSON:**
{
"identifier": "pr-quality-reviewer",
"whenToUse": "Use this agent when the user asks to review a pull request, check code quality, or analyze PR changes. Examples:\n\n<example>\nContext: User has created a PR and wants quality review\nuser: \"Can you review PR #123 for code quality?\"\nassistant: \"I'll use the pr-quality-reviewer agent to analyze the PR.\"\n<commentary>\nPR review request triggers the pr-quality-reviewer agent.\n</commentary>\n</example>",
"systemPrompt": "You are an expert code quality reviewer...\n\n**Your Core Responsibilities:**\n1. Analyze code changes for quality issues\n2. Check adherence to best practices\n..."
}
```
## Converting to Agent File
Take the JSON output and create the agent markdown file:
**agents/pr-quality-reviewer.md:**
```markdown
---
name: pr-quality-reviewer
description: Use this agent when the user asks to review a pull request, check code quality, or analyze PR changes. Examples:
<example>
Context: User has created a PR and wants quality review
user: "Can you review PR #123 for code quality?"
assistant: "I'll use the pr-quality-reviewer agent to analyze the PR."
<commentary>
PR review request triggers the pr-quality-reviewer agent.
</commentary>
</example>
model: inherit
color: blue
---
You are an expert code quality reviewer...
**Your Core Responsibilities:**
1. Analyze code changes for quality issues
2. Check adherence to best practices
...
```
## Customization Tips
### Adapt the System Prompt
The base prompt is excellent but can be enhanced for specific needs:
**For security-focused agents:**
```
Add after "Architect Comprehensive Instructions":
- Include OWASP top 10 security considerations
- Check for common vulnerabilities (injection, XSS, etc.)
- Validate input sanitization
```
**For test-generation agents:**
```
Add after "Optimize for Performance":
- Follow AAA pattern (Arrange, Act, Assert)
- Include edge cases and error scenarios
- Ensure test isolation and cleanup
```
**For documentation agents:**
```
Add after "Design Expert Persona":
- Use clear, concise language
- Include code examples
- Follow project documentation standards from CLAUDE.md
```
## Best Practices from Internal Implementation
### 1. Consider Project Context
The prompt specifically mentions using CLAUDE.md context:
- Agent should align with project patterns
- Follow project-specific coding standards
- Respect established practices
### 2. Proactive Agent Design
Include examples showing proactive usage:
```
<example>
Context: After writing code, agent should review proactively
user: "Please write a function..."
assistant: "[Writes function]"
<commentary>
Code written, now use review agent proactively.
</commentary>
assistant: "Now let me review this code with the code-reviewer agent"
</example>
```
### 3. Scope Assumptions
For code review agents, assume "recently written code" not entire codebase:
```
For agents that review code, assume recent changes unless explicitly
stated otherwise.
```
### 4. Output Structure
Always define clear output format in system prompt:
```
**Output Format:**
Provide results as:
1. Summary (2-3 sentences)
2. Detailed findings (bullet points)
3. Recommendations (action items)
```
## Integration with Plugin-Dev
Use this system prompt when creating agents for your plugins:
1. Take user request for agent functionality
2. Feed to Claude with this system prompt
3. Get JSON output (identifier, whenToUse, systemPrompt)
4. Convert to agent markdown file with frontmatter
5. Validate with agent validation rules
6. Test triggering conditions
7. Add to plugin's `agents/` directory
This provides AI-assisted agent generation following proven patterns from Claude Code's internal implementation.

View File

@@ -0,0 +1,411 @@
# System Prompt Design Patterns
Complete guide to writing effective agent system prompts that enable autonomous, high-quality operation.
## Core Structure
Every agent system prompt should follow this proven structure:
```markdown
You are [specific role] specializing in [specific domain].
**Your Core Responsibilities:**
1. [Primary responsibility - the main task]
2. [Secondary responsibility - supporting task]
3. [Additional responsibilities as needed]
**[Task Name] Process:**
1. [First concrete step]
2. [Second concrete step]
3. [Continue with clear steps]
[...]
**Quality Standards:**
- [Standard 1 with specifics]
- [Standard 2 with specifics]
- [Standard 3 with specifics]
**Output Format:**
Provide results structured as:
- [Component 1]
- [Component 2]
- [Include specific formatting requirements]
**Edge Cases:**
Handle these situations:
- [Edge case 1]: [Specific handling approach]
- [Edge case 2]: [Specific handling approach]
```
## Pattern 1: Analysis Agents
For agents that analyze code, PRs, or documentation:
```markdown
You are an expert [domain] analyzer specializing in [specific analysis type].
**Your Core Responsibilities:**
1. Thoroughly analyze [what] for [specific issues]
2. Identify [patterns/problems/opportunities]
3. Provide actionable recommendations
**Analysis Process:**
1. **Gather Context**: Read [what] using available tools
2. **Initial Scan**: Identify obvious [issues/patterns]
3. **Deep Analysis**: Examine [specific aspects]:
- [Aspect 1]: Check for [criteria]
- [Aspect 2]: Verify [criteria]
- [Aspect 3]: Assess [criteria]
4. **Synthesize Findings**: Group related issues
5. **Prioritize**: Rank by [severity/impact/urgency]
6. **Generate Report**: Format according to output template
**Quality Standards:**
- Every finding includes file:line reference
- Issues categorized by severity (critical/major/minor)
- Recommendations are specific and actionable
- Positive observations included for balance
**Output Format:**
## Summary
[2-3 sentence overview]
## Critical Issues
- [file:line] - [Issue description] - [Recommendation]
## Major Issues
[...]
## Minor Issues
[...]
## Recommendations
[...]
**Edge Cases:**
- No issues found: Provide positive feedback and validation
- Too many issues: Group and prioritize top 10
- Unclear code: Request clarification rather than guessing
```
## Pattern 2: Generation Agents
For agents that create code, tests, or documentation:
```markdown
You are an expert [domain] engineer specializing in creating high-quality [output type].
**Your Core Responsibilities:**
1. Generate [what] that meets [quality standards]
2. Follow [specific conventions/patterns]
3. Ensure [correctness/completeness/clarity]
**Generation Process:**
1. **Understand Requirements**: Analyze what needs to be created
2. **Gather Context**: Read existing [code/docs/tests] for patterns
3. **Design Structure**: Plan [architecture/organization/flow]
4. **Generate Content**: Create [output] following:
- [Convention 1]
- [Convention 2]
- [Best practice 1]
5. **Validate**: Verify [correctness/completeness]
6. **Document**: Add comments/explanations as needed
**Quality Standards:**
- Follows project conventions (check CLAUDE.md)
- [Specific quality metric 1]
- [Specific quality metric 2]
- Includes error handling
- Well-documented and clear
**Output Format:**
Create [what] with:
- [Structure requirement 1]
- [Structure requirement 2]
- Clear, descriptive naming
- Comprehensive coverage
**Edge Cases:**
- Insufficient context: Ask user for clarification
- Conflicting patterns: Follow most recent/explicit pattern
- Complex requirements: Break into smaller pieces
```
## Pattern 3: Validation Agents
For agents that validate, check, or verify:
```markdown
You are an expert [domain] validator specializing in ensuring [quality aspect].
**Your Core Responsibilities:**
1. Validate [what] against [criteria]
2. Identify violations and issues
3. Provide clear pass/fail determination
**Validation Process:**
1. **Load Criteria**: Understand validation requirements
2. **Scan Target**: Read [what] needs validation
3. **Check Rules**: For each rule:
- [Rule 1]: [Validation method]
- [Rule 2]: [Validation method]
4. **Collect Violations**: Document each failure with details
5. **Assess Severity**: Categorize issues
6. **Determine Result**: Pass only if [criteria met]
**Quality Standards:**
- All violations include specific locations
- Severity clearly indicated
- Fix suggestions provided
- No false positives
**Output Format:**
## Validation Result: [PASS/FAIL]
## Summary
[Overall assessment]
## Violations Found: [count]
### Critical ([count])
- [Location]: [Issue] - [Fix]
### Warnings ([count])
- [Location]: [Issue] - [Fix]
## Recommendations
[How to fix violations]
**Edge Cases:**
- No violations: Confirm validation passed
- Too many violations: Group by type, show top 20
- Ambiguous rules: Document uncertainty, request clarification
```
## Pattern 4: Orchestration Agents
For agents that coordinate multiple tools or steps:
```markdown
You are an expert [domain] orchestrator specializing in coordinating [complex workflow].
**Your Core Responsibilities:**
1. Coordinate [multi-step process]
2. Manage [resources/tools/dependencies]
3. Ensure [successful completion/integration]
**Orchestration Process:**
1. **Plan**: Understand full workflow and dependencies
2. **Prepare**: Set up prerequisites
3. **Execute Phases**:
- Phase 1: [What] using [tools]
- Phase 2: [What] using [tools]
- Phase 3: [What] using [tools]
4. **Monitor**: Track progress and handle failures
5. **Verify**: Confirm successful completion
6. **Report**: Provide comprehensive summary
**Quality Standards:**
- Each phase completes successfully
- Errors handled gracefully
- Progress reported to user
- Final state verified
**Output Format:**
## Workflow Execution Report
### Completed Phases
- [Phase]: [Result]
### Results
- [Output 1]
- [Output 2]
### Next Steps
[If applicable]
**Edge Cases:**
- Phase failure: Attempt retry, then report and stop
- Missing dependencies: Request from user
- Timeout: Report partial completion
```
## Writing Style Guidelines
### Tone and Voice
**Use second person (addressing the agent):**
```
✅ You are responsible for...
✅ You will analyze...
✅ Your process should...
❌ The agent is responsible for...
❌ This agent will analyze...
❌ I will analyze...
```
### Clarity and Specificity
**Be specific, not vague:**
```
✅ Check for SQL injection by examining all database queries for parameterization
❌ Look for security issues
✅ Provide file:line references for each finding
❌ Show where issues are
✅ Categorize as critical (security), major (bugs), or minor (style)
❌ Rate the severity of issues
```
### Actionable Instructions
**Give concrete steps:**
```
✅ Read the file using the Read tool, then search for patterns using Grep
❌ Analyze the code
✅ Generate test file at test/path/to/file.test.ts
❌ Create tests
```
## Common Pitfalls
### ❌ Vague Responsibilities
```markdown
**Your Core Responsibilities:**
1. Help the user with their code
2. Provide assistance
3. Be helpful
```
**Why bad:** Not specific enough to guide behavior.
### ✅ Specific Responsibilities
```markdown
**Your Core Responsibilities:**
1. Analyze TypeScript code for type safety issues
2. Identify missing type annotations and improper 'any' usage
3. Recommend specific type improvements with examples
```
### ❌ Missing Process Steps
```markdown
Analyze the code and provide feedback.
```
**Why bad:** Agent doesn't know HOW to analyze.
### ✅ Clear Process
```markdown
**Analysis Process:**
1. Read code files using Read tool
2. Scan for type annotations on all functions
3. Check for 'any' type usage
4. Verify generic type parameters
5. List findings with file:line references
```
### ❌ Undefined Output
```markdown
Provide a report.
```
**Why bad:** Agent doesn't know what format to use.
### ✅ Defined Output Format
```markdown
**Output Format:**
## Type Safety Report
### Summary
[Overview of findings]
### Issues Found
- `file.ts:42` - Missing return type on `processData`
- `utils.ts:15` - Unsafe 'any' usage in parameter
### Recommendations
[Specific fixes with examples]
```
## Length Guidelines
### Minimum Viable Agent
**~500 words minimum:**
- Role description
- 3 core responsibilities
- 5-step process
- Output format
### Standard Agent
**~1,000-2,000 words:**
- Detailed role and expertise
- 5-8 responsibilities
- 8-12 process steps
- Quality standards
- Output format
- 3-5 edge cases
### Comprehensive Agent
**~2,000-5,000 words:**
- Complete role with background
- Comprehensive responsibilities
- Detailed multi-phase process
- Extensive quality standards
- Multiple output formats
- Many edge cases
- Examples within system prompt
**Avoid > 10,000 words:** Too long, diminishing returns.
## Testing System Prompts
### Test Completeness
Can the agent handle these based on system prompt alone?
- [ ] Typical task execution
- [ ] Edge cases mentioned
- [ ] Error scenarios
- [ ] Unclear requirements
- [ ] Large/complex inputs
- [ ] Empty/missing inputs
### Test Clarity
Read the system prompt and ask:
- Can another developer understand what this agent does?
- Are process steps clear and actionable?
- Is output format unambiguous?
- Are quality standards measurable?
### Iterate Based on Results
After testing agent:
1. Identify where it struggled
2. Add missing guidance to system prompt
3. Clarify ambiguous instructions
4. Add process steps for edge cases
5. Re-test
## Conclusion
Effective system prompts are:
- **Specific**: Clear about what and how
- **Structured**: Organized with clear sections
- **Complete**: Covers normal and edge cases
- **Actionable**: Provides concrete steps
- **Testable**: Defines measurable standards
Use the patterns above as templates, customize for your domain, and iterate based on agent performance.

View File

@@ -0,0 +1,491 @@
# Agent Triggering Examples: Best Practices
Complete guide to writing effective `<example>` blocks in agent descriptions for reliable triggering.
## Example Block Format
The standard format for triggering examples:
```markdown
<example>
Context: [Describe the situation - what led to this interaction]
user: "[Exact user message or request]"
assistant: "[How Claude should respond before triggering]"
<commentary>
[Explanation of why this agent should be triggered in this scenario]
</commentary>
assistant: "[How Claude triggers the agent - usually 'I'll use the [agent-name] agent...']"
</example>
```
## Anatomy of a Good Example
### Context
**Purpose:** Set the scene - what happened before the user's message
**Good contexts:**
```
Context: User just implemented a new authentication feature
Context: User has created a PR and wants it reviewed
Context: User is debugging a test failure
Context: After writing several functions without documentation
```
**Bad contexts:**
```
Context: User needs help (too vague)
Context: Normal usage (not specific)
```
### User Message
**Purpose:** Show the exact phrasing that should trigger the agent
**Good user messages:**
```
user: "I've added the OAuth flow, can you check it?"
user: "Review PR #123"
user: "Why is this test failing?"
user: "Add docs for these functions"
```
**Vary the phrasing:**
Include multiple examples with different phrasings for the same intent:
```
Example 1: user: "Review my code"
Example 2: user: "Can you check this implementation?"
Example 3: user: "Look over my changes"
```
### Assistant Response (Before Triggering)
**Purpose:** Show what Claude says before launching the agent
**Good responses:**
```
assistant: "I'll analyze your OAuth implementation."
assistant: "Let me review that PR for you."
assistant: "I'll investigate the test failure."
```
**Proactive example:**
```
assistant: "Great! Now let me review the code quality."
<commentary>
Code was just written, proactively trigger review agent.
</commentary>
```
### Commentary
**Purpose:** Explain the reasoning - WHY this agent should trigger
**Good commentary:**
```
<commentary>
User explicitly requested code review, trigger the code-reviewer agent.
</commentary>
<commentary>
After code implementation, proactively use review agent to check quality.
</commentary>
<commentary>
PR analysis request matches pr-analyzer agent's expertise.
</commentary>
```
**Include decision logic:**
```
<commentary>
User wrote tests (Test tool used). The test-analyzer agent should check
test quality and coverage before continuing.
</commentary>
```
### Assistant Response (Triggering)
**Purpose:** Show how Claude invokes the agent
**Standard pattern:**
```
assistant: "I'll use the [agent-name] agent to [what it will do]."
```
**Examples:**
```
assistant: "I'll use the code-reviewer agent to analyze the changes."
assistant: "Let me use the test-generator agent to create comprehensive tests."
assistant: "I'll use the security-analyzer agent to check for vulnerabilities."
```
## Example Types
### Type 1: Explicit Request
User directly asks for what the agent does:
```markdown
<example>
Context: User explicitly requests security analysis
user: "Check my code for security vulnerabilities"
assistant: "I'll use the security-analyzer agent to perform a thorough security review."
<commentary>
Direct security analysis request triggers the security-analyzer agent.
</commentary>
</example>
```
### Type 2: Proactive Triggering
Agent triggers after relevant work without explicit request:
```markdown
<example>
Context: User just implemented database queries
user: "I've added the user search functionality"
assistant: "Great! Let me review it for security."
<commentary>
Database code written. Proactively trigger security-analyzer to check
for SQL injection and other database security issues.
</commentary>
assistant: "I'll use the security-analyzer agent to check the database code."
</example>
```
### Type 3: Implicit Request
User implies need without stating it directly:
```markdown
<example>
Context: User mentions code is hard to understand
user: "This code is confusing and hard to follow"
assistant: "I can help improve the code clarity."
<commentary>
User indicates complexity issues. Trigger code-simplifier agent to refactor
for better readability.
</commentary>
assistant: "I'll use the code-simplifier agent to suggest improvements."
</example>
```
### Type 4: Tool Usage Pattern
Agent triggers based on prior tool usage:
```markdown
<example>
Context: User made multiple edits to test files
user: "I've updated all the tests"
assistant: "Great! Let me verify test quality."
<commentary>
Multiple Edit tools used on test files. Proactively trigger test-quality-analyzer
to ensure tests follow best practices.
</commentary>
assistant: "I'll use the test-quality-analyzer agent to review the tests."
</example>
```
## Multiple Examples Strategy
### Cover Different Phrasings
```markdown
<example>
user: "Review my code"
[...]
</example>
<example>
user: "Can you check my implementation?"
[...]
</example>
<example>
user: "Look over these changes"
[...]
</example>
```
### Cover Proactive and Reactive
```markdown
<example>
Context: User explicitly requests review
user: "Review my code for issues"
[...]
</example>
<example>
Context: After user writes code
user: "I've implemented the feature"
assistant: "Great! Now let me review it."
<commentary>
Code written, proactively review.
</commentary>
[...]
</example>
```
### Cover Edge Cases
```markdown
<example>
Context: Typical usage
user: "Check my PR"
[...]
</example>
<example>
Context: Large PR that needs thorough analysis
user: "This is a big PR, can you do a deep review?"
[...]
<commentary>
Large PR mentioned, use thorough analysis mode.
</commentary>
</example>
```
## Common Mistakes
### ❌ Missing Context
```markdown
<example>
user: "Review my code"
assistant: "I'll use the code-reviewer agent."
</example>
```
**Why bad:** No context about what led to this request.
### ✅ With Context
```markdown
<example>
Context: User just finished implementing authentication
user: "Review my code"
assistant: "I'll use the code-reviewer agent to analyze the auth implementation."
<commentary>
Auth code written, review for security and best practices.
</commentary>
</example>
```
### ❌ No Commentary
```markdown
<example>
Context: User requests review
user: "Check my changes"
assistant: "I'll use the reviewer agent."
</example>
```
**Why bad:** Doesn't explain WHY agent triggers.
### ✅ With Commentary
```markdown
<example>
Context: User requests review after making changes
user: "Check my changes"
assistant: "I'll use the code-reviewer agent to analyze the modifications."
<commentary>
Code review request triggers agent to check quality, security, and adherence to standards.
</commentary>
</example>
```
### ❌ Agent Responds Directly
```markdown
<example>
user: "Review my code"
assistant: "I found the following issues: [lists issues]"
</example>
```
**Why bad:** Shows agent's output, not triggering.
### ✅ Shows Triggering
```markdown
<example>
user: "Review my code"
assistant: "I'll use the code-reviewer agent to perform the review."
<commentary>
Review request triggers the agent.
</commentary>
</example>
```
## How Many Examples?
### Minimum: 2 Examples
Cover at least:
1. Explicit request
2. One variation or proactive trigger
### Recommended: 3-4 Examples
Cover:
1. Explicit request (direct ask)
2. Implicit request (user implies need)
3. Proactive trigger (after relevant work)
4. Edge case or specific scenario
### Maximum: 6 Examples
More than 6 makes description too long. Focus on most important scenarios.
## Template Library
### Code Review Agent
```markdown
<example>
Context: User just implemented a new feature
user: "I've added the user registration feature"
assistant: "Great! Let me review the code quality."
<commentary>
Code written, proactively trigger code-reviewer agent.
</commentary>
assistant: "I'll use the code-reviewer agent to analyze the implementation."
</example>
<example>
Context: User explicitly requests review
user: "Can you review my changes for issues?"
assistant: "I'll use the code-reviewer agent to perform a thorough review."
<commentary>
Explicit review request triggers the agent.
</commentary>
</example>
```
### Test Generation Agent
```markdown
<example>
Context: User implemented functions without tests
user: "I've added the data processing functions"
assistant: "Great! Let me generate tests for these."
<commentary>
New code without tests, proactively trigger test-generator agent.
</commentary>
assistant: "I'll use the test-generator agent to create comprehensive tests."
</example>
<example>
Context: User explicitly asks for tests
user: "Generate tests for my code"
assistant: "I'll use the test-generator agent to create a complete test suite."
<commentary>
Direct test generation request triggers the agent.
</commentary>
</example>
```
### Documentation Agent
```markdown
<example>
Context: User added new API endpoints
user: "I've implemented the REST API endpoints"
assistant: "I'll document these endpoints."
<commentary>
New API code, proactively generate documentation.
</commentary>
assistant: "I'll use the api-docs-writer agent to create comprehensive documentation."
</example>
<example>
Context: User requests documentation
user: "Write docs for this API"
assistant: "I'll use the api-docs-writer agent to generate clear documentation."
<commentary>
Documentation request triggers the agent.
</commentary>
</example>
```
### Validation Agent
```markdown
<example>
Context: User ready to commit changes
user: "I'm ready to commit this code"
assistant: "Let me validate it first."
<commentary>
Before commit, proactively validate with validation-agent.
</commentary>
assistant: "I'll use the code-validator agent to check for issues."
</example>
<example>
Context: User asks for validation
user: "Validate my implementation"
assistant: "I'll use the code-validator agent to verify correctness."
<commentary>
Explicit validation request triggers the agent.
</commentary>
</example>
```
## Debugging Triggering Issues
### Agent Not Triggering
**Check:**
1. Examples include relevant keywords from user message
2. Context matches actual usage scenarios
3. Commentary explains triggering logic clearly
4. Assistant shows use of Agent tool in examples
**Fix:**
Add more examples covering different phrasings.
### Agent Triggers Too Often
**Check:**
1. Examples are too broad or generic
2. Triggering conditions overlap with other agents
3. Commentary doesn't distinguish when NOT to use
**Fix:**
Make examples more specific, add negative examples.
### Agent Triggers in Wrong Scenarios
**Check:**
1. Examples don't match actual intended use
2. Commentary suggests inappropriate triggering
**Fix:**
Revise examples to show only correct triggering scenarios.
## Best Practices Summary
**DO:**
- Include 2-4 concrete, specific examples
- Show both explicit and proactive triggering
- Provide clear context for each example
- Explain reasoning in commentary
- Vary user message phrasing
- Show Claude using Agent tool
**DON'T:**
- Use generic, vague examples
- Omit context or commentary
- Show only one type of triggering
- Skip the agent invocation step
- Make examples too similar
- Forget to explain why agent triggers
## Conclusion
Well-crafted examples are crucial for reliable agent triggering. Invest time in creating diverse, specific examples that clearly demonstrate when and why the agent should be used.

View File

@@ -0,0 +1,217 @@
#!/bin/bash
# Agent File Validator
# Validates agent markdown files for correct structure and content
set -euo pipefail
# Usage
if [ $# -eq 0 ]; then
echo "Usage: $0 <path/to/agent.md>"
echo ""
echo "Validates agent file for:"
echo " - YAML frontmatter structure"
echo " - Required fields (name, description, model, color)"
echo " - Field formats and constraints"
echo " - System prompt presence and length"
echo " - Example blocks in description"
exit 1
fi
AGENT_FILE="$1"
echo "🔍 Validating agent file: $AGENT_FILE"
echo ""
# Check 1: File exists
if [ ! -f "$AGENT_FILE" ]; then
echo "❌ File not found: $AGENT_FILE"
exit 1
fi
echo "✅ File exists"
# Check 2: Starts with ---
FIRST_LINE=$(head -1 "$AGENT_FILE")
if [ "$FIRST_LINE" != "---" ]; then
echo "❌ File must start with YAML frontmatter (---)"
exit 1
fi
echo "✅ Starts with frontmatter"
# Check 3: Has closing ---
if ! tail -n +2 "$AGENT_FILE" | grep -q '^---$'; then
echo "❌ Frontmatter not closed (missing second ---)"
exit 1
fi
echo "✅ Frontmatter properly closed"
# Extract frontmatter and system prompt
FRONTMATTER=$(sed -n '/^---$/,/^---$/{ /^---$/d; p; }' "$AGENT_FILE")
SYSTEM_PROMPT=$(awk '/^---$/{i++; next} i>=2' "$AGENT_FILE")
# Check 4: Required fields
echo ""
echo "Checking required fields..."
error_count=0
warning_count=0
# Check name field
NAME=$(echo "$FRONTMATTER" | grep '^name:' | sed 's/name: *//' | sed 's/^"\(.*\)"$/\1/')
if [ -z "$NAME" ]; then
echo "❌ Missing required field: name"
((error_count++))
else
echo "✅ name: $NAME"
# Validate name format
if ! [[ "$NAME" =~ ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$ ]]; then
echo "❌ name must start/end with alphanumeric and contain only letters, numbers, hyphens"
((error_count++))
fi
# Validate name length
name_length=${#NAME}
if [ $name_length -lt 3 ]; then
echo "❌ name too short (minimum 3 characters)"
((error_count++))
elif [ $name_length -gt 50 ]; then
echo "❌ name too long (maximum 50 characters)"
((error_count++))
fi
# Check for generic names
if [[ "$NAME" =~ ^(helper|assistant|agent|tool)$ ]]; then
echo "⚠️ name is too generic: $NAME"
((warning_count++))
fi
fi
# Check description field
DESCRIPTION=$(echo "$FRONTMATTER" | grep '^description:' | sed 's/description: *//')
if [ -z "$DESCRIPTION" ]; then
echo "❌ Missing required field: description"
((error_count++))
else
desc_length=${#DESCRIPTION}
echo "✅ description: ${desc_length} characters"
if [ $desc_length -lt 10 ]; then
echo "⚠️ description too short (minimum 10 characters recommended)"
((warning_count++))
elif [ $desc_length -gt 5000 ]; then
echo "⚠️ description very long (over 5000 characters)"
((warning_count++))
fi
# Check for example blocks
if ! echo "$DESCRIPTION" | grep -q '<example>'; then
echo "⚠️ description should include <example> blocks for triggering"
((warning_count++))
fi
# Check for "Use this agent when" pattern
if ! echo "$DESCRIPTION" | grep -qi 'use this agent when'; then
echo "⚠️ description should start with 'Use this agent when...'"
((warning_count++))
fi
fi
# Check model field
MODEL=$(echo "$FRONTMATTER" | grep '^model:' | sed 's/model: *//')
if [ -z "$MODEL" ]; then
echo "❌ Missing required field: model"
((error_count++))
else
echo "✅ model: $MODEL"
case "$MODEL" in
inherit|sonnet|opus|haiku)
# Valid model
;;
*)
echo "⚠️ Unknown model: $MODEL (valid: inherit, sonnet, opus, haiku)"
((warning_count++))
;;
esac
fi
# Check color field
COLOR=$(echo "$FRONTMATTER" | grep '^color:' | sed 's/color: *//')
if [ -z "$COLOR" ]; then
echo "❌ Missing required field: color"
((error_count++))
else
echo "✅ color: $COLOR"
case "$COLOR" in
blue|cyan|green|yellow|magenta|red)
# Valid color
;;
*)
echo "⚠️ Unknown color: $COLOR (valid: blue, cyan, green, yellow, magenta, red)"
((warning_count++))
;;
esac
fi
# Check tools field (optional)
TOOLS=$(echo "$FRONTMATTER" | grep '^tools:' | sed 's/tools: *//')
if [ -n "$TOOLS" ]; then
echo "✅ tools: $TOOLS"
else
echo "💡 tools: not specified (agent has access to all tools)"
fi
# Check 5: System prompt
echo ""
echo "Checking system prompt..."
if [ -z "$SYSTEM_PROMPT" ]; then
echo "❌ System prompt is empty"
((error_count++))
else
prompt_length=${#SYSTEM_PROMPT}
echo "✅ System prompt: $prompt_length characters"
if [ $prompt_length -lt 20 ]; then
echo "❌ System prompt too short (minimum 20 characters)"
((error_count++))
elif [ $prompt_length -gt 10000 ]; then
echo "⚠️ System prompt very long (over 10,000 characters)"
((warning_count++))
fi
# Check for second person
if ! echo "$SYSTEM_PROMPT" | grep -q "You are\|You will\|Your"; then
echo "⚠️ System prompt should use second person (You are..., You will...)"
((warning_count++))
fi
# Check for structure
if ! echo "$SYSTEM_PROMPT" | grep -qi "responsibilities\|process\|steps"; then
echo "💡 Consider adding clear responsibilities or process steps"
fi
if ! echo "$SYSTEM_PROMPT" | grep -qi "output"; then
echo "💡 Consider defining output format expectations"
fi
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
if [ $error_count -eq 0 ] && [ $warning_count -eq 0 ]; then
echo "✅ All checks passed!"
exit 0
elif [ $error_count -eq 0 ]; then
echo "⚠️ Validation passed with $warning_count warning(s)"
exit 0
else
echo "❌ Validation failed with $error_count error(s) and $warning_count warning(s)"
exit 1
fi

View File

@@ -0,0 +1,272 @@
# Command Development Skill
Comprehensive guidance on creating Claude Code slash commands, including file format, frontmatter options, dynamic arguments, and best practices.
## Overview
This skill provides knowledge about:
- Slash command file format and structure
- YAML frontmatter configuration fields
- Dynamic arguments ($ARGUMENTS, $1, $2, etc.)
- File references with @ syntax
- Bash execution with !` syntax
- Command organization and namespacing
- Best practices for command development
- Plugin-specific features (${CLAUDE_PLUGIN_ROOT}, plugin patterns)
- Integration with plugin components (agents, skills, hooks)
- Validation patterns and error handling
## Skill Structure
### SKILL.md (~2,470 words)
Core skill content covering:
**Fundamentals:**
- Command basics and locations
- File format (Markdown with optional frontmatter)
- YAML frontmatter fields overview
- Dynamic arguments ($ARGUMENTS and positional)
- File references (@ syntax)
- Bash execution (!` syntax)
- Command organization patterns
- Best practices and common patterns
- Troubleshooting
**Plugin-Specific:**
- ${CLAUDE_PLUGIN_ROOT} environment variable
- Plugin command discovery and organization
- Plugin command patterns (configuration, template, multi-script)
- Integration with plugin components (agents, skills, hooks)
- Validation patterns (argument, file, resource, error handling)
### References
Detailed documentation:
- **frontmatter-reference.md**: Complete YAML frontmatter field specifications
- All field descriptions with types and defaults
- When to use each field
- Examples and best practices
- Validation and common errors
- **plugin-features-reference.md**: Plugin-specific command features
- Plugin command discovery and organization
- ${CLAUDE_PLUGIN_ROOT} environment variable usage
- Plugin command patterns (configuration, template, multi-script)
- Integration with plugin agents, skills, and hooks
- Validation patterns and error handling
### Examples
Practical command examples:
- **simple-commands.md**: 10 complete command examples
- Code review commands
- Testing commands
- Deployment commands
- Documentation generators
- Git integration commands
- Analysis and research commands
- **plugin-commands.md**: 10 plugin-specific command examples
- Simple plugin commands with scripts
- Multi-script workflows
- Template-based generation
- Configuration-driven deployment
- Agent and skill integration
- Multi-component workflows
- Validated input commands
- Environment-aware commands
## When This Skill Triggers
Claude Code activates this skill when users:
- Ask to "create a slash command" or "add a command"
- Need to "write a custom command"
- Want to "define command arguments"
- Ask about "command frontmatter" or YAML configuration
- Need to "organize commands" or use namespacing
- Want to create commands with file references
- Ask about "bash execution in commands"
- Need command development best practices
## Progressive Disclosure
The skill uses progressive disclosure:
1. **SKILL.md** (~2,470 words): Core concepts, common patterns, and plugin features overview
2. **References** (~13,500 words total): Detailed specifications
- frontmatter-reference.md (~1,200 words)
- plugin-features-reference.md (~1,800 words)
- interactive-commands.md (~2,500 words)
- advanced-workflows.md (~1,700 words)
- testing-strategies.md (~2,200 words)
- documentation-patterns.md (~2,000 words)
- marketplace-considerations.md (~2,200 words)
3. **Examples** (~6,000 words total): Complete working command examples
- simple-commands.md
- plugin-commands.md
Claude loads references and examples as needed based on task.
## Command Basics Quick Reference
### File Format
```markdown
---
description: Brief description
argument-hint: [arg1] [arg2]
allowed-tools: Read, Bash(git:*)
---
Command prompt content with:
- Arguments: $1, $2, or $ARGUMENTS
- Files: @path/to/file
- Bash: !`command here`
```
### Locations
- **Project**: `.claude/commands/` (shared with team)
- **Personal**: `~/.claude/commands/` (your commands)
- **Plugin**: `plugin-name/commands/` (plugin-specific)
### Key Features
**Dynamic arguments:**
- `$ARGUMENTS` - All arguments as single string
- `$1`, `$2`, `$3` - Positional arguments
**File references:**
- `@path/to/file` - Include file contents
**Bash execution:**
- `!`command`` - Execute and include output
## Frontmatter Fields Quick Reference
| Field | Purpose | Example |
|-------|---------|---------|
| `description` | Brief description for /help | `"Review code for issues"` |
| `allowed-tools` | Restrict tool access | `Read, Bash(git:*)` |
| `model` | Specify model | `sonnet`, `opus`, `haiku` |
| `argument-hint` | Document arguments | `[pr-number] [priority]` |
| `disable-model-invocation` | Manual-only command | `true` |
## Common Patterns
### Simple Review Command
```markdown
---
description: Review code for issues
---
Review this code for quality and potential bugs.
```
### Command with Arguments
```markdown
---
description: Deploy to environment
argument-hint: [environment] [version]
---
Deploy to $1 environment using version $2
```
### Command with File Reference
```markdown
---
description: Document file
argument-hint: [file-path]
---
Generate documentation for @$1
```
### Command with Bash Execution
```markdown
---
description: Show Git status
allowed-tools: Bash(git:*)
---
Current status: !`git status`
Recent commits: !`git log --oneline -5`
```
## Development Workflow
1. **Design command:**
- Define purpose and scope
- Determine required arguments
- Identify needed tools
2. **Create file:**
- Choose appropriate location
- Create `.md` file with command name
- Write basic prompt
3. **Add frontmatter:**
- Start minimal (just description)
- Add fields as needed (allowed-tools, etc.)
- Document arguments with argument-hint
4. **Test command:**
- Invoke with `/command-name`
- Verify arguments work
- Check bash execution
- Test file references
5. **Refine:**
- Improve prompt clarity
- Handle edge cases
- Add examples in comments
- Document requirements
## Best Practices Summary
1. **Single responsibility**: One command, one clear purpose
2. **Clear descriptions**: Make discoverable in `/help`
3. **Document arguments**: Always use argument-hint
4. **Minimal tools**: Use most restrictive allowed-tools
5. **Test thoroughly**: Verify all features work
6. **Add comments**: Explain complex logic
7. **Handle errors**: Consider missing arguments/files
## Status
**Completed enhancements:**
- ✓ Plugin command patterns (${CLAUDE_PLUGIN_ROOT}, discovery, organization)
- ✓ Integration patterns (agents, skills, hooks coordination)
- ✓ Validation patterns (input, file, resource validation, error handling)
**Remaining enhancements (in progress):**
- Advanced workflows (multi-step command sequences)
- Testing strategies (how to test commands effectively)
- Documentation patterns (command documentation best practices)
- Marketplace considerations (publishing and distribution)
## Maintenance
To update this skill:
1. Keep SKILL.md focused on core fundamentals
2. Move detailed specifications to references/
3. Add new examples/ for different use cases
4. Update frontmatter when new fields added
5. Ensure imperative/infinitive form throughout
6. Test examples work with current Claude Code
## Version History
**v0.1.0** (2025-01-15):
- Initial release with basic command fundamentals
- Frontmatter field reference
- 10 simple command examples
- Ready for plugin-specific pattern additions

View File

@@ -0,0 +1,833 @@
---
name: command-development
description: This skill should be used when the user asks to "create a slash command", "add a command", "write a custom command", "define command arguments", "use command frontmatter", "organize commands", "create command with file references", "interactive command", "use AskUserQuestion in command", or needs guidance on slash command structure, YAML frontmatter fields, dynamic arguments, bash execution in commands, user interaction patterns, or command development best practices for Claude Code.
---
# Command Development for Claude Code
## Overview
Slash commands are frequently-used prompts defined as Markdown files that Claude executes during interactive sessions. Understanding command structure, frontmatter options, and dynamic features enables creating powerful, reusable workflows.
**Key concepts:**
- Markdown file format for commands
- YAML frontmatter for configuration
- Dynamic arguments and file references
- Bash execution for context
- Command organization and namespacing
## Command Basics
### What is a Slash Command?
A slash command is a Markdown file containing a prompt that Claude executes when invoked. Commands provide:
- **Reusability**: Define once, use repeatedly
- **Consistency**: Standardize common workflows
- **Sharing**: Distribute across team or projects
- **Efficiency**: Quick access to complex prompts
### Critical: Commands are Instructions FOR Claude
**Commands are written for agent consumption, not human consumption.**
When a user invokes `/command-name`, the command content becomes Claude's instructions. Write commands as directives TO Claude about what to do, not as messages TO the user.
**Correct approach (instructions for Claude):**
```markdown
Review this code for security vulnerabilities including:
- SQL injection
- XSS attacks
- Authentication issues
Provide specific line numbers and severity ratings.
```
**Incorrect approach (messages to user):**
```markdown
This command will review your code for security issues.
You'll receive a report with vulnerability details.
```
The first example tells Claude what to do. The second tells the user what will happen but doesn't instruct Claude. Always use the first approach.
### Command Locations
**Project commands** (shared with team):
- Location: `.claude/commands/`
- Scope: Available in specific project
- Label: Shown as "(project)" in `/help`
- Use for: Team workflows, project-specific tasks
**Personal commands** (available everywhere):
- Location: `~/.claude/commands/`
- Scope: Available in all projects
- Label: Shown as "(user)" in `/help`
- Use for: Personal workflows, cross-project utilities
**Plugin commands** (bundled with plugins):
- Location: `plugin-name/commands/`
- Scope: Available when plugin installed
- Label: Shown as "(plugin-name)" in `/help`
- Use for: Plugin-specific functionality
## File Format
### Basic Structure
Commands are Markdown files with `.md` extension:
```
.claude/commands/
├── review.md # /review command
├── test.md # /test command
└── deploy.md # /deploy command
```
**Simple command:**
```markdown
Review this code for security vulnerabilities including:
- SQL injection
- XSS attacks
- Authentication bypass
- Insecure data handling
```
No frontmatter needed for basic commands.
### With YAML Frontmatter
Add configuration using YAML frontmatter:
```markdown
---
description: Review code for security issues
allowed-tools: Read, Grep, Bash(git:*)
model: sonnet
---
Review this code for security vulnerabilities...
```
## YAML Frontmatter Fields
### description
**Purpose:** Brief description shown in `/help`
**Type:** String
**Default:** First line of command prompt
```yaml
---
description: Review pull request for code quality
---
```
**Best practice:** Clear, actionable description (under 60 characters)
### allowed-tools
**Purpose:** Specify which tools command can use
**Type:** String or Array
**Default:** Inherits from conversation
```yaml
---
allowed-tools: Read, Write, Edit, Bash(git:*)
---
```
**Patterns:**
- `Read, Write, Edit` - Specific tools
- `Bash(git:*)` - Bash with git commands only
- `*` - All tools (rarely needed)
**Use when:** Command requires specific tool access
### model
**Purpose:** Specify model for command execution
**Type:** String (sonnet, opus, haiku)
**Default:** Inherits from conversation
```yaml
---
model: haiku
---
```
**Use cases:**
- `haiku` - Fast, simple commands
- `sonnet` - Standard workflows
- `opus` - Complex analysis
### argument-hint
**Purpose:** Document expected arguments for autocomplete
**Type:** String
**Default:** None
```yaml
---
argument-hint: [pr-number] [priority] [assignee]
---
```
**Benefits:**
- Helps users understand command arguments
- Improves command discovery
- Documents command interface
### disable-model-invocation
**Purpose:** Prevent SlashCommand tool from programmatically calling command
**Type:** Boolean
**Default:** false
```yaml
---
disable-model-invocation: true
---
```
**Use when:** Command should only be manually invoked
## Dynamic Arguments
### Using $ARGUMENTS
Capture all arguments as single string:
```markdown
---
description: Fix issue by number
argument-hint: [issue-number]
---
Fix issue #$ARGUMENTS following our coding standards and best practices.
```
**Usage:**
```
> /fix-issue 123
> /fix-issue 456
```
**Expands to:**
```
Fix issue #123 following our coding standards...
Fix issue #456 following our coding standards...
```
### Using Positional Arguments
Capture individual arguments with `$1`, `$2`, `$3`, etc.:
```markdown
---
description: Review PR with priority and assignee
argument-hint: [pr-number] [priority] [assignee]
---
Review pull request #$1 with priority level $2.
After review, assign to $3 for follow-up.
```
**Usage:**
```
> /review-pr 123 high alice
```
**Expands to:**
```
Review pull request #123 with priority level high.
After review, assign to alice for follow-up.
```
### Combining Arguments
Mix positional and remaining arguments:
```markdown
Deploy $1 to $2 environment with options: $3
```
**Usage:**
```
> /deploy api staging --force --skip-tests
```
**Expands to:**
```
Deploy api to staging environment with options: --force --skip-tests
```
## File References
### Using @ Syntax
Include file contents in command:
```markdown
---
description: Review specific file
argument-hint: [file-path]
---
Review @$1 for:
- Code quality
- Best practices
- Potential bugs
```
**Usage:**
```
> /review-file src/api/users.ts
```
**Effect:** Claude reads `src/api/users.ts` before processing command
### Multiple File References
Reference multiple files:
```markdown
Compare @src/old-version.js with @src/new-version.js
Identify:
- Breaking changes
- New features
- Bug fixes
```
### Static File References
Reference known files without arguments:
```markdown
Review @package.json and @tsconfig.json for consistency
Ensure:
- TypeScript version matches
- Dependencies are aligned
- Build configuration is correct
```
## Bash Execution in Commands
Commands can execute bash commands inline to dynamically gather context before Claude processes the command. This is useful for including repository state, environment information, or project-specific context.
**When to use:**
- Include dynamic context (git status, environment vars, etc.)
- Gather project/repository state
- Build context-aware workflows
**Implementation details:**
For complete syntax, examples, and best practices, see `references/plugin-features-reference.md` section on bash execution. The reference includes the exact syntax and multiple working examples to avoid execution issues
## Command Organization
### Flat Structure
Simple organization for small command sets:
```
.claude/commands/
├── build.md
├── test.md
├── deploy.md
├── review.md
└── docs.md
```
**Use when:** 5-15 commands, no clear categories
### Namespaced Structure
Organize commands in subdirectories:
```
.claude/commands/
├── ci/
│ ├── build.md # /build (project:ci)
│ ├── test.md # /test (project:ci)
│ └── lint.md # /lint (project:ci)
├── git/
│ ├── commit.md # /commit (project:git)
│ └── pr.md # /pr (project:git)
└── docs/
├── generate.md # /generate (project:docs)
└── publish.md # /publish (project:docs)
```
**Benefits:**
- Logical grouping by category
- Namespace shown in `/help`
- Easier to find related commands
**Use when:** 15+ commands, clear categories
## Best Practices
### Command Design
1. **Single responsibility:** One command, one task
2. **Clear descriptions:** Self-explanatory in `/help`
3. **Explicit dependencies:** Use `allowed-tools` when needed
4. **Document arguments:** Always provide `argument-hint`
5. **Consistent naming:** Use verb-noun pattern (review-pr, fix-issue)
### Argument Handling
1. **Validate arguments:** Check for required arguments in prompt
2. **Provide defaults:** Suggest defaults when arguments missing
3. **Document format:** Explain expected argument format
4. **Handle edge cases:** Consider missing or invalid arguments
```markdown
---
argument-hint: [pr-number]
---
$IF($1,
Review PR #$1,
Please provide a PR number. Usage: /review-pr [number]
)
```
### File References
1. **Explicit paths:** Use clear file paths
2. **Check existence:** Handle missing files gracefully
3. **Relative paths:** Use project-relative paths
4. **Glob support:** Consider using Glob tool for patterns
### Bash Commands
1. **Limit scope:** Use `Bash(git:*)` not `Bash(*)`
2. **Safe commands:** Avoid destructive operations
3. **Handle errors:** Consider command failures
4. **Keep fast:** Long-running commands slow invocation
### Documentation
1. **Add comments:** Explain complex logic
2. **Provide examples:** Show usage in comments
3. **List requirements:** Document dependencies
4. **Version commands:** Note breaking changes
```markdown
---
description: Deploy application to environment
argument-hint: [environment] [version]
---
<!--
Usage: /deploy [staging|production] [version]
Requires: AWS credentials configured
Example: /deploy staging v1.2.3
-->
Deploy application to $1 environment using version $2...
```
## Common Patterns
### Review Pattern
```markdown
---
description: Review code changes
allowed-tools: Read, Bash(git:*)
---
Files changed: !`git diff --name-only`
Review each file for:
1. Code quality and style
2. Potential bugs or issues
3. Test coverage
4. Documentation needs
Provide specific feedback for each file.
```
### Testing Pattern
```markdown
---
description: Run tests for specific file
argument-hint: [test-file]
allowed-tools: Bash(npm:*)
---
Run tests: !`npm test $1`
Analyze results and suggest fixes for failures.
```
### Documentation Pattern
```markdown
---
description: Generate documentation for file
argument-hint: [source-file]
---
Generate comprehensive documentation for @$1 including:
- Function/class descriptions
- Parameter documentation
- Return value descriptions
- Usage examples
- Edge cases and errors
```
### Workflow Pattern
```markdown
---
description: Complete PR workflow
argument-hint: [pr-number]
allowed-tools: Bash(gh:*), Read
---
PR #$1 Workflow:
1. Fetch PR: !`gh pr view $1`
2. Review changes
3. Run checks
4. Approve or request changes
```
## Troubleshooting
**Command not appearing:**
- Check file is in correct directory
- Verify `.md` extension present
- Ensure valid Markdown format
- Restart Claude Code
**Arguments not working:**
- Verify `$1`, `$2` syntax correct
- Check `argument-hint` matches usage
- Ensure no extra spaces
**Bash execution failing:**
- Check `allowed-tools` includes Bash
- Verify command syntax in backticks
- Test command in terminal first
- Check for required permissions
**File references not working:**
- Verify `@` syntax correct
- Check file path is valid
- Ensure Read tool allowed
- Use absolute or project-relative paths
## Plugin-Specific Features
### CLAUDE_PLUGIN_ROOT Variable
Plugin commands have access to `${CLAUDE_PLUGIN_ROOT}`, an environment variable that resolves to the plugin's absolute path.
**Purpose:**
- Reference plugin files portably
- Execute plugin scripts
- Load plugin configuration
- Access plugin templates
**Basic usage:**
```markdown
---
description: Analyze using plugin script
allowed-tools: Bash(node:*)
---
Run analysis: !`node ${CLAUDE_PLUGIN_ROOT}/scripts/analyze.js $1`
Review results and report findings.
```
**Common patterns:**
```markdown
# Execute plugin script
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/script.sh`
# Load plugin configuration
@${CLAUDE_PLUGIN_ROOT}/config/settings.json
# Use plugin template
@${CLAUDE_PLUGIN_ROOT}/templates/report.md
# Access plugin resources
@${CLAUDE_PLUGIN_ROOT}/docs/reference.md
```
**Why use it:**
- Works across all installations
- Portable between systems
- No hardcoded paths needed
- Essential for multi-file plugins
### Plugin Command Organization
Plugin commands discovered automatically from `commands/` directory:
```
plugin-name/
├── commands/
│ ├── foo.md # /foo (plugin:plugin-name)
│ ├── bar.md # /bar (plugin:plugin-name)
│ └── utils/
│ └── helper.md # /helper (plugin:plugin-name:utils)
└── plugin.json
```
**Namespace benefits:**
- Logical command grouping
- Shown in `/help` output
- Avoid name conflicts
- Organize related commands
**Naming conventions:**
- Use descriptive action names
- Avoid generic names (test, run)
- Consider plugin-specific prefix
- Use hyphens for multi-word names
### Plugin Command Patterns
**Configuration-based pattern:**
```markdown
---
description: Deploy using plugin configuration
argument-hint: [environment]
allowed-tools: Read, Bash(*)
---
Load configuration: @${CLAUDE_PLUGIN_ROOT}/config/$1-deploy.json
Deploy to $1 using configuration settings.
Monitor deployment and report status.
```
**Template-based pattern:**
```markdown
---
description: Generate docs from template
argument-hint: [component]
---
Template: @${CLAUDE_PLUGIN_ROOT}/templates/docs.md
Generate documentation for $1 following template structure.
```
**Multi-script pattern:**
```markdown
---
description: Complete build workflow
allowed-tools: Bash(*)
---
Build: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh`
Test: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/test.sh`
Package: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/package.sh`
Review outputs and report workflow status.
```
**See `references/plugin-features-reference.md` for detailed patterns.**
## Integration with Plugin Components
Commands can integrate with other plugin components for powerful workflows.
### Agent Integration
Launch plugin agents for complex tasks:
```markdown
---
description: Deep code review
argument-hint: [file-path]
---
Initiate comprehensive review of @$1 using the code-reviewer agent.
The agent will analyze:
- Code structure
- Security issues
- Performance
- Best practices
Agent uses plugin resources:
- ${CLAUDE_PLUGIN_ROOT}/config/rules.json
- ${CLAUDE_PLUGIN_ROOT}/checklists/review.md
```
**Key points:**
- Agent must exist in `plugin/agents/` directory
- Claude uses Task tool to launch agent
- Document agent capabilities
- Reference plugin resources agent uses
### Skill Integration
Leverage plugin skills for specialized knowledge:
```markdown
---
description: Document API with standards
argument-hint: [api-file]
---
Document API in @$1 following plugin standards.
Use the api-docs-standards skill to ensure:
- Complete endpoint documentation
- Consistent formatting
- Example quality
- Error documentation
Generate production-ready API docs.
```
**Key points:**
- Skill must exist in `plugin/skills/` directory
- Mention skill name to trigger invocation
- Document skill purpose
- Explain what skill provides
### Hook Coordination
Design commands that work with plugin hooks:
- Commands can prepare state for hooks to process
- Hooks execute automatically on tool events
- Commands should document expected hook behavior
- Guide Claude on interpreting hook output
See `references/plugin-features-reference.md` for examples of commands that coordinate with hooks
### Multi-Component Workflows
Combine agents, skills, and scripts:
```markdown
---
description: Comprehensive review workflow
argument-hint: [file]
allowed-tools: Bash(node:*), Read
---
Target: @$1
Phase 1 - Static Analysis:
!`node ${CLAUDE_PLUGIN_ROOT}/scripts/lint.js $1`
Phase 2 - Deep Review:
Launch code-reviewer agent for detailed analysis.
Phase 3 - Standards Check:
Use coding-standards skill for validation.
Phase 4 - Report:
Template: @${CLAUDE_PLUGIN_ROOT}/templates/review.md
Compile findings into report following template.
```
**When to use:**
- Complex multi-step workflows
- Leverage multiple plugin capabilities
- Require specialized analysis
- Need structured outputs
## Validation Patterns
Commands should validate inputs and resources before processing.
### Argument Validation
```markdown
---
description: Deploy with validation
argument-hint: [environment]
---
Validate environment: !`echo "$1" | grep -E "^(dev|staging|prod)$" || echo "INVALID"`
If $1 is valid environment:
Deploy to $1
Otherwise:
Explain valid environments: dev, staging, prod
Show usage: /deploy [environment]
```
### File Existence Checks
```markdown
---
description: Process configuration
argument-hint: [config-file]
---
Check file exists: !`test -f $1 && echo "EXISTS" || echo "MISSING"`
If file exists:
Process configuration: @$1
Otherwise:
Explain where to place config file
Show expected format
Provide example configuration
```
### Plugin Resource Validation
```markdown
---
description: Run plugin analyzer
allowed-tools: Bash(test:*)
---
Validate plugin setup:
- Script: !`test -x ${CLAUDE_PLUGIN_ROOT}/bin/analyze && echo "✓" || echo "✗"`
- Config: !`test -f ${CLAUDE_PLUGIN_ROOT}/config.json && echo "✓" || echo "✗"`
If all checks pass, run analysis.
Otherwise, report missing components.
```
### Error Handling
```markdown
---
description: Build with error handling
allowed-tools: Bash(*)
---
Execute build: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh 2>&1 || echo "BUILD_FAILED"`
If build succeeded:
Report success and output location
If build failed:
Analyze error output
Suggest likely causes
Provide troubleshooting steps
```
**Best practices:**
- Validate early in command
- Provide helpful error messages
- Suggest corrective actions
- Handle edge cases gracefully
---
For detailed frontmatter field specifications, see `references/frontmatter-reference.md`.
For plugin-specific features and patterns, see `references/plugin-features-reference.md`.
For command pattern examples, see `examples/` directory.

View File

@@ -0,0 +1,557 @@
# Plugin Command Examples
Practical examples of commands designed for Claude Code plugins, demonstrating plugin-specific patterns and features.
## Table of Contents
1. [Simple Plugin Command](#1-simple-plugin-command)
2. [Script-Based Analysis](#2-script-based-analysis)
3. [Template-Based Generation](#3-template-based-generation)
4. [Multi-Script Workflow](#4-multi-script-workflow)
5. [Configuration-Driven Deployment](#5-configuration-driven-deployment)
6. [Agent Integration](#6-agent-integration)
7. [Skill Integration](#7-skill-integration)
8. [Multi-Component Workflow](#8-multi-component-workflow)
9. [Validated Input Command](#9-validated-input-command)
10. [Environment-Aware Command](#10-environment-aware-command)
---
## 1. Simple Plugin Command
**Use case:** Basic command that uses plugin script
**File:** `commands/analyze.md`
```markdown
---
description: Analyze code quality using plugin tools
argument-hint: [file-path]
allowed-tools: Bash(node:*), Read
---
Analyze @$1 using plugin's quality checker:
!`node ${CLAUDE_PLUGIN_ROOT}/scripts/quality-check.js $1`
Review the analysis output and provide:
1. Summary of findings
2. Priority issues to address
3. Suggested improvements
4. Code quality score interpretation
```
**Key features:**
- Uses `${CLAUDE_PLUGIN_ROOT}` for portable path
- Combines file reference with script execution
- Simple single-purpose command
---
## 2. Script-Based Analysis
**Use case:** Run comprehensive analysis using multiple plugin scripts
**File:** `commands/full-audit.md`
```markdown
---
description: Complete code audit using plugin suite
argument-hint: [directory]
allowed-tools: Bash(*)
model: sonnet
---
Running complete audit on $1:
**Security scan:**
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/security-scan.sh $1`
**Performance analysis:**
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/perf-analyze.sh $1`
**Best practices check:**
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/best-practices.sh $1`
Analyze all results and create comprehensive report including:
- Critical issues requiring immediate attention
- Performance optimization opportunities
- Security vulnerabilities and fixes
- Overall health score and recommendations
```
**Key features:**
- Multiple script executions
- Organized output sections
- Comprehensive workflow
- Clear reporting structure
---
## 3. Template-Based Generation
**Use case:** Generate documentation following plugin template
**File:** `commands/gen-api-docs.md`
```markdown
---
description: Generate API documentation from template
argument-hint: [api-file]
---
Template structure: @${CLAUDE_PLUGIN_ROOT}/templates/api-documentation.md
API implementation: @$1
Generate complete API documentation following the template format above.
Ensure documentation includes:
- Endpoint descriptions with HTTP methods
- Request/response schemas
- Authentication requirements
- Error codes and handling
- Usage examples with curl commands
- Rate limiting information
Format output as markdown suitable for README or docs site.
```
**Key features:**
- Uses plugin template
- Combines template with source file
- Standardized output format
- Clear documentation structure
---
## 4. Multi-Script Workflow
**Use case:** Orchestrate build, test, and deploy workflow
**File:** `commands/release.md`
```markdown
---
description: Execute complete release workflow
argument-hint: [version]
allowed-tools: Bash(*), Read
---
Executing release workflow for version $1:
**Step 1 - Pre-release validation:**
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/pre-release-check.sh $1`
**Step 2 - Build artifacts:**
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build-release.sh $1`
**Step 3 - Run test suite:**
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/run-tests.sh`
**Step 4 - Package release:**
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/package.sh $1`
Review all step outputs and report:
1. Any failures or warnings
2. Build artifacts location
3. Test results summary
4. Next steps for deployment
5. Rollback plan if needed
```
**Key features:**
- Multi-step workflow
- Sequential script execution
- Clear step numbering
- Comprehensive reporting
---
## 5. Configuration-Driven Deployment
**Use case:** Deploy using environment-specific plugin configuration
**File:** `commands/deploy.md`
```markdown
---
description: Deploy application to environment
argument-hint: [environment]
allowed-tools: Read, Bash(*)
---
Deployment configuration for $1: @${CLAUDE_PLUGIN_ROOT}/config/$1-deploy.json
Current git state: !`git rev-parse --short HEAD`
Build info: !`cat package.json | grep -E '(name|version)'`
Execute deployment to $1 environment using configuration above.
Deployment checklist:
1. Validate configuration settings
2. Build application for $1
3. Run pre-deployment tests
4. Deploy to target environment
5. Run smoke tests
6. Verify deployment success
7. Update deployment log
Report deployment status and any issues encountered.
```
**Key features:**
- Environment-specific configuration
- Dynamic config file loading
- Pre-deployment validation
- Structured checklist
---
## 6. Agent Integration
**Use case:** Command that launches plugin agent for complex task
**File:** `commands/deep-review.md`
```markdown
---
description: Deep code review using plugin agent
argument-hint: [file-or-directory]
---
Initiate comprehensive code review of @$1 using the code-reviewer agent.
The agent will perform:
1. **Static analysis** - Check for code smells and anti-patterns
2. **Security audit** - Identify potential vulnerabilities
3. **Performance review** - Find optimization opportunities
4. **Best practices** - Ensure code follows standards
5. **Documentation check** - Verify adequate documentation
The agent has access to:
- Plugin's linting rules: ${CLAUDE_PLUGIN_ROOT}/config/lint-rules.json
- Security checklist: ${CLAUDE_PLUGIN_ROOT}/checklists/security.md
- Performance guidelines: ${CLAUDE_PLUGIN_ROOT}/docs/performance.md
Note: This uses the Task tool to launch the plugin's code-reviewer agent for thorough analysis.
```
**Key features:**
- Delegates to plugin agent
- Documents agent capabilities
- References plugin resources
- Clear scope definition
---
## 7. Skill Integration
**Use case:** Command that leverages plugin skill for specialized knowledge
**File:** `commands/document-api.md`
```markdown
---
description: Document API following plugin standards
argument-hint: [api-file]
---
API source code: @$1
Generate API documentation following the plugin's API documentation standards.
Use the api-documentation-standards skill to ensure:
- **OpenAPI compliance** - Follow OpenAPI 3.0 specification
- **Consistent formatting** - Use plugin's documentation style
- **Complete coverage** - Document all endpoints and schemas
- **Example quality** - Provide realistic usage examples
- **Error documentation** - Cover all error scenarios
The skill provides:
- Standard documentation templates
- API documentation best practices
- Common patterns for this codebase
- Quality validation criteria
Generate production-ready API documentation.
```
**Key features:**
- Invokes plugin skill by name
- Documents skill purpose
- Clear expectations
- Leverages skill knowledge
---
## 8. Multi-Component Workflow
**Use case:** Complex workflow using agents, skills, and scripts
**File:** `commands/complete-review.md`
```markdown
---
description: Comprehensive review using all plugin components
argument-hint: [file-path]
allowed-tools: Bash(node:*), Read
---
Target file: @$1
Execute comprehensive review workflow:
**Phase 1: Automated Analysis**
Run plugin analyzer: !`node ${CLAUDE_PLUGIN_ROOT}/scripts/analyze.js $1`
**Phase 2: Deep Review (Agent)**
Launch the code-quality-reviewer agent for detailed analysis.
Agent will examine:
- Code structure and organization
- Error handling patterns
- Testing coverage
- Documentation quality
**Phase 3: Standards Check (Skill)**
Use the coding-standards skill to validate:
- Naming conventions
- Code formatting
- Best practices adherence
- Framework-specific patterns
**Phase 4: Report Generation**
Template: @${CLAUDE_PLUGIN_ROOT}/templates/review-report.md
Compile all findings into comprehensive report following template.
**Phase 5: Recommendations**
Generate prioritized action items:
1. Critical issues (must fix)
2. Important improvements (should fix)
3. Nice-to-have enhancements (could fix)
Include specific file locations and suggested changes for each item.
```
**Key features:**
- Multi-phase workflow
- Combines scripts, agents, skills
- Template-based reporting
- Prioritized outputs
---
## 9. Validated Input Command
**Use case:** Command with input validation and error handling
**File:** `commands/build-env.md`
```markdown
---
description: Build for specific environment with validation
argument-hint: [environment]
allowed-tools: Bash(*)
---
Validate environment argument: !`echo "$1" | grep -E "^(dev|staging|prod)$" && echo "VALID" || echo "INVALID"`
Check build script exists: !`test -x ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh && echo "EXISTS" || echo "MISSING"`
Verify configuration available: !`test -f ${CLAUDE_PLUGIN_ROOT}/config/$1.json && echo "FOUND" || echo "NOT_FOUND"`
If all validations pass:
**Configuration:** @${CLAUDE_PLUGIN_ROOT}/config/$1.json
**Execute build:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/build.sh $1 2>&1`
**Validation results:** !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/validate-build.sh $1 2>&1`
Report build status and any issues.
If validations fail:
- Explain which validation failed
- Provide expected values/locations
- Suggest corrective actions
- Document troubleshooting steps
```
**Key features:**
- Input validation
- Resource existence checks
- Error handling
- Helpful error messages
- Graceful failure handling
---
## 10. Environment-Aware Command
**Use case:** Command that adapts behavior based on environment
**File:** `commands/run-checks.md`
```markdown
---
description: Run environment-appropriate checks
argument-hint: [environment]
allowed-tools: Bash(*), Read
---
Environment: $1
Load environment configuration: @${CLAUDE_PLUGIN_ROOT}/config/$1-checks.json
Determine check level: !`echo "$1" | grep -E "^prod$" && echo "FULL" || echo "BASIC"`
**For production environment:**
- Full test suite: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/test-full.sh`
- Security scan: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/security-scan.sh`
- Performance audit: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/perf-check.sh`
- Compliance check: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/compliance.sh`
**For non-production environments:**
- Basic tests: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/test-basic.sh`
- Quick lint: !`bash ${CLAUDE_PLUGIN_ROOT}/scripts/lint.sh`
Analyze results based on environment requirements:
**Production:** All checks must pass with zero critical issues
**Staging:** No critical issues, warnings acceptable
**Development:** Focus on blocking issues only
Report status and recommend proceed/block decision.
```
**Key features:**
- Environment-aware logic
- Conditional execution
- Different validation levels
- Appropriate reporting per environment
---
## Common Patterns Summary
### Pattern: Plugin Script Execution
```markdown
!`node ${CLAUDE_PLUGIN_ROOT}/scripts/script-name.js $1`
```
Use for: Running plugin-provided Node.js scripts
### Pattern: Plugin Configuration Loading
```markdown
@${CLAUDE_PLUGIN_ROOT}/config/config-name.json
```
Use for: Loading plugin configuration files
### Pattern: Plugin Template Usage
```markdown
@${CLAUDE_PLUGIN_ROOT}/templates/template-name.md
```
Use for: Using plugin templates for generation
### Pattern: Agent Invocation
```markdown
Launch the [agent-name] agent for [task description].
```
Use for: Delegating complex tasks to plugin agents
### Pattern: Skill Reference
```markdown
Use the [skill-name] skill to ensure [requirements].
```
Use for: Leveraging plugin skills for specialized knowledge
### Pattern: Input Validation
```markdown
Validate input: !`echo "$1" | grep -E "^pattern$" && echo "OK" || echo "ERROR"`
```
Use for: Validating command arguments
### Pattern: Resource Validation
```markdown
Check exists: !`test -f ${CLAUDE_PLUGIN_ROOT}/path/file && echo "YES" || echo "NO"`
```
Use for: Verifying required plugin files exist
---
## Development Tips
### Testing Plugin Commands
1. **Test with plugin installed:**
```bash
cd /path/to/plugin
claude /command-name args
```
2. **Verify ${CLAUDE_PLUGIN_ROOT} expansion:**
```bash
# Add debug output to command
!`echo "Plugin root: ${CLAUDE_PLUGIN_ROOT}"`
```
3. **Test across different working directories:**
```bash
cd /tmp && claude /command-name
cd /other/project && claude /command-name
```
4. **Validate resource availability:**
```bash
# Check all plugin resources exist
!`ls -la ${CLAUDE_PLUGIN_ROOT}/scripts/`
!`ls -la ${CLAUDE_PLUGIN_ROOT}/config/`
```
### Common Mistakes to Avoid
1. **Using relative paths instead of ${CLAUDE_PLUGIN_ROOT}:**
```markdown
# Wrong
!`node ./scripts/analyze.js`
# Correct
!`node ${CLAUDE_PLUGIN_ROOT}/scripts/analyze.js`
```
2. **Forgetting to allow required tools:**
```markdown
# Missing allowed-tools
!`bash script.sh` # Will fail without Bash permission
# Correct
---
allowed-tools: Bash(*)
---
!`bash ${CLAUDE_PLUGIN_ROOT}/scripts/script.sh`
```
3. **Not validating inputs:**
```markdown
# Risky - no validation
Deploy to $1 environment
# Better - with validation
Validate: !`echo "$1" | grep -E "^(dev|staging|prod)$" || echo "INVALID"`
Deploy to $1 environment (if valid)
```
4. **Hardcoding plugin paths:**
```markdown
# Wrong - breaks on different installations
@/home/user/.claude/plugins/my-plugin/config.json
# Correct - works everywhere
@${CLAUDE_PLUGIN_ROOT}/config.json
```
---
For detailed plugin-specific features, see `references/plugin-features-reference.md`.
For general command development, see main `SKILL.md`.

View File

@@ -0,0 +1,504 @@
# Simple Command Examples
Basic slash command patterns for common use cases.
**Important:** All examples below are written as instructions FOR Claude (agent consumption), not messages TO users. Commands tell Claude what to do, not tell users what will happen.
## Example 1: Code Review Command
**File:** `.claude/commands/review.md`
```markdown
---
description: Review code for quality and issues
allowed-tools: Read, Bash(git:*)
---
Review the code in this repository for:
1. **Code Quality:**
- Readability and maintainability
- Consistent style and formatting
- Appropriate abstraction levels
2. **Potential Issues:**
- Logic errors or bugs
- Edge cases not handled
- Performance concerns
3. **Best Practices:**
- Design patterns used correctly
- Error handling present
- Documentation adequate
Provide specific feedback with file and line references.
```
**Usage:**
```
> /review
```
---
## Example 2: Security Review Command
**File:** `.claude/commands/security-review.md`
```markdown
---
description: Review code for security vulnerabilities
allowed-tools: Read, Grep
model: sonnet
---
Perform comprehensive security review checking for:
**Common Vulnerabilities:**
- SQL injection risks
- Cross-site scripting (XSS)
- Authentication/authorization issues
- Insecure data handling
- Hardcoded secrets or credentials
**Security Best Practices:**
- Input validation present
- Output encoding correct
- Secure defaults used
- Error messages safe
- Logging appropriate (no sensitive data)
For each issue found:
- File and line number
- Severity (Critical/High/Medium/Low)
- Description of vulnerability
- Recommended fix
Prioritize issues by severity.
```
**Usage:**
```
> /security-review
```
---
## Example 3: Test Command with File Argument
**File:** `.claude/commands/test-file.md`
```markdown
---
description: Run tests for specific file
argument-hint: [test-file]
allowed-tools: Bash(npm:*), Bash(jest:*)
---
Run tests for $1:
Test execution: !`npm test $1`
Analyze results:
- Tests passed/failed
- Code coverage
- Performance issues
- Flaky tests
If failures found, suggest fixes based on error messages.
```
**Usage:**
```
> /test-file src/utils/helpers.test.ts
```
---
## Example 4: Documentation Generator
**File:** `.claude/commands/document.md`
```markdown
---
description: Generate documentation for file
argument-hint: [source-file]
---
Generate comprehensive documentation for @$1
Include:
**Overview:**
- Purpose and responsibility
- Main functionality
- Dependencies
**API Documentation:**
- Function/method signatures
- Parameter descriptions with types
- Return values with types
- Exceptions/errors thrown
**Usage Examples:**
- Basic usage
- Common patterns
- Edge cases
**Implementation Notes:**
- Algorithm complexity
- Performance considerations
- Known limitations
Format as Markdown suitable for project documentation.
```
**Usage:**
```
> /document src/api/users.ts
```
---
## Example 5: Git Status Summary
**File:** `.claude/commands/git-status.md`
```markdown
---
description: Summarize Git repository status
allowed-tools: Bash(git:*)
---
Repository Status Summary:
**Current Branch:** !`git branch --show-current`
**Status:** !`git status --short`
**Recent Commits:** !`git log --oneline -5`
**Remote Status:** !`git fetch && git status -sb`
Provide:
- Summary of changes
- Suggested next actions
- Any warnings or issues
```
**Usage:**
```
> /git-status
```
---
## Example 6: Deployment Command
**File:** `.claude/commands/deploy.md`
```markdown
---
description: Deploy to specified environment
argument-hint: [environment] [version]
allowed-tools: Bash(kubectl:*), Read
---
Deploy to $1 environment using version $2
**Pre-deployment Checks:**
1. Verify $1 configuration exists
2. Check version $2 is valid
3. Verify cluster accessibility: !`kubectl cluster-info`
**Deployment Steps:**
1. Update deployment manifest with version $2
2. Apply configuration to $1
3. Monitor rollout status
4. Verify pod health
5. Run smoke tests
**Rollback Plan:**
Document current version for rollback if issues occur.
Proceed with deployment? (yes/no)
```
**Usage:**
```
> /deploy staging v1.2.3
```
---
## Example 7: Comparison Command
**File:** `.claude/commands/compare-files.md`
```markdown
---
description: Compare two files
argument-hint: [file1] [file2]
---
Compare @$1 with @$2
**Analysis:**
1. **Differences:**
- Lines added
- Lines removed
- Lines modified
2. **Functional Changes:**
- Breaking changes
- New features
- Bug fixes
- Refactoring
3. **Impact:**
- Affected components
- Required updates elsewhere
- Migration requirements
4. **Recommendations:**
- Code review focus areas
- Testing requirements
- Documentation updates needed
Present as structured comparison report.
```
**Usage:**
```
> /compare-files src/old-api.ts src/new-api.ts
```
---
## Example 8: Quick Fix Command
**File:** `.claude/commands/quick-fix.md`
```markdown
---
description: Quick fix for common issues
argument-hint: [issue-description]
model: haiku
---
Quickly fix: $ARGUMENTS
**Approach:**
1. Identify the issue
2. Find relevant code
3. Propose fix
4. Explain solution
Focus on:
- Simple, direct solution
- Minimal changes
- Following existing patterns
- No breaking changes
Provide code changes with file paths and line numbers.
```
**Usage:**
```
> /quick-fix button not responding to clicks
> /quick-fix typo in error message
```
---
## Example 9: Research Command
**File:** `.claude/commands/research.md`
```markdown
---
description: Research best practices for topic
argument-hint: [topic]
model: sonnet
---
Research best practices for: $ARGUMENTS
**Coverage:**
1. **Current State:**
- How we currently handle this
- Existing implementations
2. **Industry Standards:**
- Common patterns
- Recommended approaches
- Tools and libraries
3. **Comparison:**
- Our approach vs standards
- Gaps or improvements needed
- Migration considerations
4. **Recommendations:**
- Concrete action items
- Priority and effort estimates
- Resources for implementation
Provide actionable guidance based on research.
```
**Usage:**
```
> /research error handling in async operations
> /research API authentication patterns
```
---
## Example 10: Explain Code Command
**File:** `.claude/commands/explain.md`
```markdown
---
description: Explain how code works
argument-hint: [file-or-function]
---
Explain @$1 in detail
**Explanation Structure:**
1. **Overview:**
- What it does
- Why it exists
- How it fits in system
2. **Step-by-Step:**
- Line-by-line walkthrough
- Key algorithms or logic
- Important details
3. **Inputs and Outputs:**
- Parameters and types
- Return values
- Side effects
4. **Edge Cases:**
- Error handling
- Special cases
- Limitations
5. **Usage Examples:**
- How to call it
- Common patterns
- Integration points
Explain at level appropriate for junior engineer.
```
**Usage:**
```
> /explain src/utils/cache.ts
> /explain AuthService.login
```
---
## Key Patterns
### Pattern 1: Read-Only Analysis
```markdown
---
allowed-tools: Read, Grep
---
Analyze but don't modify...
```
**Use for:** Code review, documentation, analysis
### Pattern 2: Git Operations
```markdown
---
allowed-tools: Bash(git:*)
---
!`git status`
Analyze and suggest...
```
**Use for:** Repository status, commit analysis
### Pattern 3: Single Argument
```markdown
---
argument-hint: [target]
---
Process $1...
```
**Use for:** File operations, targeted actions
### Pattern 4: Multiple Arguments
```markdown
---
argument-hint: [source] [target] [options]
---
Process $1 to $2 with $3...
```
**Use for:** Workflows, deployments, comparisons
### Pattern 5: Fast Execution
```markdown
---
model: haiku
---
Quick simple task...
```
**Use for:** Simple, repetitive commands
### Pattern 6: File Comparison
```markdown
Compare @$1 with @$2...
```
**Use for:** Diff analysis, migration planning
### Pattern 7: Context Gathering
```markdown
---
allowed-tools: Bash(git:*), Read
---
Context: !`git status`
Files: @file1 @file2
Analyze...
```
**Use for:** Informed decision making
## Tips for Writing Simple Commands
1. **Start basic:** Single responsibility, clear purpose
2. **Add complexity gradually:** Start without frontmatter
3. **Test incrementally:** Verify each feature works
4. **Use descriptive names:** Command name should indicate purpose
5. **Document arguments:** Always use argument-hint
6. **Provide examples:** Show usage in comments
7. **Handle errors:** Consider missing arguments or files

View File

@@ -0,0 +1,722 @@
# Advanced Workflow Patterns
Multi-step command sequences and composition patterns for complex workflows.
## Overview
Advanced workflows combine multiple commands, coordinate state across invocations, and create sophisticated automation sequences. These patterns enable building complex functionality from simple command building blocks.
## Multi-Step Command Patterns
### Sequential Workflow Command
Commands that guide users through multi-step processes:
```markdown
---
description: Complete PR review workflow
argument-hint: [pr-number]
allowed-tools: Bash(gh:*), Read, Grep
---
# PR Review Workflow for #$1
## Step 1: Fetch PR Details
!`gh pr view $1 --json title,body,author,files`
## Step 2: Review Files
Files changed: !`gh pr diff $1 --name-only`
For each file:
- Check code quality
- Verify tests exist
- Review documentation
## Step 3: Run Checks
Test status: !`gh pr checks $1`
Verify:
- All tests passing
- No merge conflicts
- CI/CD successful
## Step 4: Provide Feedback
Summarize:
- Issues found (critical/minor)
- Suggestions for improvement
- Approval recommendation
Would you like to:
1. Approve PR
2. Request changes
3. Leave comments only
Reply with your choice and I'll help complete the action.
```
**Key features:**
- Numbered steps for clarity
- Bash execution for context
- Decision points for user input
- Next action suggestions
### State-Carrying Workflow
Commands that maintain state between invocations:
```markdown
---
description: Initialize deployment workflow
allowed-tools: Write, Bash(git:*)
---
# Initialize Deployment
Creating deployment tracking file...
Current branch: !`git branch --show-current`
Latest commit: !`git log -1 --format=%H`
Deployment state saved to `.claude/deployment-state.local.md`:
\`\`\`markdown
---
initialized: true
branch: $(git branch --show-current)
commit: $(git log -1 --format=%H)
timestamp: $(date -u +%Y-%m-%dT%H:%M:%SZ)
status: initialized
---
# Deployment Tracking
Branch: $(git branch --show-current)
Started: $(date)
Next steps:
1. Run tests: /deploy-test
2. Build: /deploy-build
3. Deploy: /deploy-execute
\`\`\`
State saved. Run `/deploy-test` to continue.
```
**Next command** (`/deploy-test`):
```markdown
---
description: Run deployment tests
allowed-tools: Read, Bash(npm:*)
---
Reading deployment state from `.claude/deployment-state.local.md`...
Running tests: !`npm test`
Updating state to 'tested'...
Tests complete. Run `/deploy-build` to continue.
```
**Pattern benefits:**
- Persistent state across commands
- Clear workflow progression
- Safety checkpoints
- Resume capability
### Conditional Workflow Branching
Commands that adapt based on conditions:
```markdown
---
description: Smart deployment workflow
argument-hint: [environment]
allowed-tools: Bash(git:*), Bash(npm:*), Read
---
# Deploy to $1
## Pre-flight Checks
Branch: !`git branch --show-current`
Status: !`git status --short`
**Checking conditions:**
1. Branch status:
- If main/master: Require approval
- If feature branch: Warning about target
- If hotfix: Fast-track process
2. Tests:
!`npm test`
- If tests fail: STOP - fix tests first
- If tests pass: Continue
3. Environment:
- If $1 = 'production': Extra validation
- If $1 = 'staging': Standard process
- If $1 = 'dev': Minimal checks
**Workflow decision:**
Based on above, proceeding with: [determined workflow]
[Conditional steps based on environment and status]
Ready to deploy? (yes/no)
```
## Command Composition Patterns
### Command Chaining
Commands designed to work together:
```markdown
---
description: Prepare for code review
---
# Prepare Code Review
Running preparation sequence:
1. Format code: /format-code
2. Run linter: /lint-code
3. Run tests: /test-all
4. Generate coverage: /coverage-report
5. Create review summary: /review-summary
This is a meta-command. After completing each step above,
I'll compile results and prepare comprehensive review materials.
Starting sequence...
```
**Individual commands** are simple:
- `/format-code` - Just formats
- `/lint-code` - Just lints
- `/test-all` - Just tests
**Composition command** orchestrates them.
### Pipeline Pattern
Commands that process output from previous commands:
```markdown
---
description: Analyze test failures
---
# Analyze Test Failures
## Step 1: Get test results
(Run /test-all first if not done)
Reading test output...
## Step 2: Categorize failures
- Flaky tests (random failures)
- Consistent failures
- New failures vs existing
## Step 3: Prioritize
Rank by:
- Impact (critical path vs edge case)
- Frequency (always fails vs sometimes)
- Effort (quick fix vs major work)
## Step 4: Generate fix plan
For each failure:
- Root cause hypothesis
- Suggested fix approach
- Estimated effort
Would you like me to:
1. Fix highest priority failure
2. Generate detailed fix plans for all
3. Create GitHub issues for each
```
### Parallel Execution Pattern
Commands that coordinate multiple simultaneous operations:
```markdown
---
description: Run comprehensive validation
allowed-tools: Bash(*), Read
---
# Comprehensive Validation
Running validations in parallel...
Starting:
- Code quality checks
- Security scanning
- Dependency audit
- Performance profiling
This will take 2-3 minutes. I'll monitor all processes
and report when complete.
[Poll each process and report progress]
All validations complete. Summary:
- Quality: PASS (0 issues)
- Security: WARN (2 minor issues)
- Dependencies: PASS
- Performance: PASS (baseline met)
Details:
[Collated results from all checks]
```
## Workflow State Management
### Using .local.md Files
Store workflow state in plugin-specific files:
```markdown
.claude/plugin-name-workflow.local.md:
---
workflow: deployment
stage: testing
started: 2025-01-15T10:30:00Z
environment: staging
branch: feature/new-api
commit: abc123def
tests_passed: false
build_complete: false
---
# Deployment Workflow State
Current stage: Testing
Started: 2025-01-15 10:30 UTC
Completed steps:
- ✅ Validation
- ✅ Branch check
- ⏳ Testing (in progress)
Pending steps:
- Build
- Deploy
- Smoke tests
```
**Reading state in commands:**
```markdown
---
description: Continue deployment workflow
allowed-tools: Read, Write
---
Reading workflow state from .claude/plugin-name-workflow.local.md...
Current stage: @.claude/plugin-name-workflow.local.md
[Parse YAML frontmatter to determine next step]
Next action based on state: [determined action]
```
### Workflow Recovery
Handle interrupted workflows:
```markdown
---
description: Resume deployment workflow
allowed-tools: Read
---
# Resume Deployment
Checking for interrupted workflow...
State file: @.claude/plugin-name-workflow.local.md
**Workflow found:**
- Started: [timestamp]
- Environment: [env]
- Last completed: [step]
**Recovery options:**
1. Resume from last step
2. Restart from beginning
3. Abort and clean up
Which would you like? (1/2/3)
```
## Workflow Coordination Patterns
### Cross-Command Communication
Commands that signal each other:
```markdown
---
description: Mark feature complete
allowed-tools: Write
---
# Mark Feature Complete
Writing completion marker...
Creating: .claude/feature-complete.flag
This signals other commands that feature is ready for:
- Integration testing (/integration-test will auto-detect)
- Documentation generation (/docs-generate will include)
- Release notes (/release-notes will add)
Feature marked complete.
```
**Other commands check for flag:**
```markdown
---
description: Generate release notes
allowed-tools: Read, Bash(git:*)
---
Checking for completed features...
if [ -f .claude/feature-complete.flag ]; then
Feature ready for release notes
fi
[Include in release notes]
```
### Workflow Locking
Prevent concurrent workflow execution:
```markdown
---
description: Start deployment
allowed-tools: Read, Write, Bash
---
# Start Deployment
Checking for active deployments...
if [ -f .claude/deployment.lock ]; then
ERROR: Deployment already in progress
Started: [timestamp from lock file]
Cannot start concurrent deployment.
Wait for completion or run /deployment-abort
Exit.
fi
Creating deployment lock...
Deployment started. Lock created.
[Proceed with deployment]
```
**Lock cleanup:**
```markdown
---
description: Complete deployment
allowed-tools: Write, Bash
---
Deployment complete.
Removing deployment lock...
rm .claude/deployment.lock
Ready for next deployment.
```
## Advanced Argument Handling
### Optional Arguments with Defaults
```markdown
---
description: Deploy with optional version
argument-hint: [environment] [version]
---
Environment: ${1:-staging}
Version: ${2:-latest}
Deploying ${2:-latest} to ${1:-staging}...
Note: Using defaults for missing arguments:
- Environment defaults to 'staging'
- Version defaults to 'latest'
```
### Argument Validation
```markdown
---
description: Deploy to validated environment
argument-hint: [environment]
---
Environment: $1
Validating environment...
valid_envs="dev staging production"
if ! echo "$valid_envs" | grep -w "$1" > /dev/null; then
ERROR: Invalid environment '$1'
Valid options: dev, staging, production
Exit.
fi
Environment validated. Proceeding...
```
### Argument Transformation
```markdown
---
description: Deploy with shorthand
argument-hint: [env-shorthand]
---
Input: $1
Expanding shorthand:
- d/dev → development
- s/stg → staging
- p/prod → production
case "$1" in
d|dev) ENV="development";;
s|stg) ENV="staging";;
p|prod) ENV="production";;
*) ENV="$1";;
esac
Deploying to: $ENV
```
## Error Handling in Workflows
### Graceful Failure
```markdown
---
description: Resilient deployment workflow
---
# Deployment Workflow
Running steps with error handling...
## Step 1: Tests
!`npm test`
if [ $? -ne 0 ]; then
ERROR: Tests failed
Options:
1. Fix tests and retry
2. Skip tests (NOT recommended)
3. Abort deployment
What would you like to do?
[Wait for user input before continuing]
fi
## Step 2: Build
[Continue only if Step 1 succeeded]
```
### Rollback on Failure
```markdown
---
description: Deployment with rollback
---
# Deploy with Rollback
Saving current state for rollback...
Previous version: !`current-version.sh`
Deploying new version...
!`deploy.sh`
if [ $? -ne 0 ]; then
DEPLOYMENT FAILED
Initiating automatic rollback...
!`rollback.sh`
Rolled back to previous version.
Check logs for failure details.
fi
Deployment complete.
```
### Checkpoint Recovery
```markdown
---
description: Workflow with checkpoints
---
# Multi-Stage Deployment
## Checkpoint 1: Validation
!`validate.sh`
echo "checkpoint:validation" >> .claude/deployment-checkpoints.log
## Checkpoint 2: Build
!`build.sh`
echo "checkpoint:build" >> .claude/deployment-checkpoints.log
## Checkpoint 3: Deploy
!`deploy.sh`
echo "checkpoint:deploy" >> .claude/deployment-checkpoints.log
If any step fails, resume with:
/deployment-resume [last-successful-checkpoint]
```
## Best Practices
### Workflow Design
1. **Clear progression**: Number steps, show current position
2. **Explicit state**: Don't rely on implicit state
3. **User control**: Provide decision points
4. **Error recovery**: Handle failures gracefully
5. **Progress indication**: Show what's done, what's pending
### Command Composition
1. **Single responsibility**: Each command does one thing well
2. **Composable design**: Commands work together easily
3. **Standard interfaces**: Consistent input/output formats
4. **Loose coupling**: Commands don't depend on each other's internals
### State Management
1. **Persistent state**: Use .local.md files
2. **Atomic updates**: Write complete state files atomically
3. **State validation**: Check state file format/completeness
4. **Cleanup**: Remove stale state files
5. **Documentation**: Document state file formats
### Error Handling
1. **Fail fast**: Detect errors early
2. **Clear messages**: Explain what went wrong
3. **Recovery options**: Provide clear next steps
4. **State preservation**: Keep state for recovery
5. **Rollback capability**: Support undoing changes
## Example: Complete Deployment Workflow
### Initialize Command
```markdown
---
description: Initialize deployment
argument-hint: [environment]
allowed-tools: Write, Bash(git:*)
---
# Initialize Deployment to $1
Creating workflow state...
\`\`\`yaml
---
workflow: deployment
environment: $1
branch: !`git branch --show-current`
commit: !`git rev-parse HEAD`
stage: initialized
timestamp: !`date -u +%Y-%m-%dT%H:%M:%SZ`
---
\`\`\`
Written to .claude/deployment-state.local.md
Next: Run /deployment-validate
```
### Validation Command
```markdown
---
description: Validate deployment
allowed-tools: Read, Bash
---
Reading state: @.claude/deployment-state.local.md
Running validation...
- Branch check: PASS
- Tests: PASS
- Build: PASS
Updating state to 'validated'...
Next: Run /deployment-execute
```
### Execution Command
```markdown
---
description: Execute deployment
allowed-tools: Read, Bash, Write
---
Reading state: @.claude/deployment-state.local.md
Executing deployment to [environment]...
!`deploy.sh [environment]`
Deployment complete.
Updating state to 'completed'...
Cleanup: /deployment-cleanup
```
### Cleanup Command
```markdown
---
description: Clean up deployment
allowed-tools: Bash
---
Removing deployment state...
rm .claude/deployment-state.local.md
Deployment workflow complete.
```
This complete workflow demonstrates state management, sequential execution, error handling, and clean separation of concerns across multiple commands.

View File

@@ -0,0 +1,739 @@
# Command Documentation Patterns
Strategies for creating self-documenting, maintainable commands with excellent user experience.
## Overview
Well-documented commands are easier to use, maintain, and distribute. Documentation should be embedded in the command itself, making it immediately accessible to users and maintainers.
## Self-Documenting Command Structure
### Complete Command Template
```markdown
---
description: Clear, actionable description under 60 chars
argument-hint: [arg1] [arg2] [optional-arg]
allowed-tools: Read, Bash(git:*)
model: sonnet
---
<!--
COMMAND: command-name
VERSION: 1.0.0
AUTHOR: Team Name
LAST UPDATED: 2025-01-15
PURPOSE:
Detailed explanation of what this command does and why it exists.
USAGE:
/command-name arg1 arg2
ARGUMENTS:
arg1: Description of first argument (required)
arg2: Description of second argument (optional, defaults to X)
EXAMPLES:
/command-name feature-branch main
→ Compares feature-branch with main
/command-name my-branch
→ Compares my-branch with current branch
REQUIREMENTS:
- Git repository
- Branch must exist
- Permissions to read repository
RELATED COMMANDS:
/other-command - Related functionality
/another-command - Alternative approach
TROUBLESHOOTING:
- If branch not found: Check branch name spelling
- If permission denied: Check repository access
CHANGELOG:
v1.0.0 (2025-01-15): Initial release
v0.9.0 (2025-01-10): Beta version
-->
# Command Implementation
[Command prompt content here...]
[Explain what will happen...]
[Guide user through steps...]
[Provide clear output...]
```
### Documentation Comment Sections
**PURPOSE**: Why the command exists
- Problem it solves
- Use cases
- When to use vs when not to use
**USAGE**: Basic syntax
- Command invocation pattern
- Required vs optional arguments
- Default values
**ARGUMENTS**: Detailed argument documentation
- Each argument described
- Type information
- Valid values/ranges
- Defaults
**EXAMPLES**: Concrete usage examples
- Common use cases
- Edge cases
- Expected outputs
**REQUIREMENTS**: Prerequisites
- Dependencies
- Permissions
- Environmental setup
**RELATED COMMANDS**: Connections
- Similar commands
- Complementary commands
- Alternative approaches
**TROUBLESHOOTING**: Common issues
- Known problems
- Solutions
- Workarounds
**CHANGELOG**: Version history
- What changed when
- Breaking changes highlighted
- Migration guidance
## In-Line Documentation Patterns
### Commented Sections
```markdown
---
description: Complex multi-step command
---
<!-- SECTION 1: VALIDATION -->
<!-- This section checks prerequisites before proceeding -->
Checking prerequisites...
- Git repository: !`git rev-parse --git-dir 2>/dev/null`
- Branch exists: [validation logic]
<!-- SECTION 2: ANALYSIS -->
<!-- Analyzes the differences between branches -->
Analyzing differences between $1 and $2...
[Analysis logic...]
<!-- SECTION 3: RECOMMENDATIONS -->
<!-- Provides actionable recommendations -->
Based on analysis, recommend:
[Recommendations...]
<!-- END: Next steps for user -->
```
### Inline Explanations
```markdown
---
description: Deployment command with inline docs
---
# Deploy to $1
## Pre-flight Checks
<!-- We check branch status to prevent deploying from wrong branch -->
Current branch: !`git branch --show-current`
<!-- Production deploys must come from main/master -->
if [ "$1" = "production" ] && [ "$(git branch --show-current)" != "main" ]; then
⚠️ WARNING: Not on main branch for production deploy
This is unusual. Confirm this is intentional.
fi
<!-- Test status ensures we don't deploy broken code -->
Running tests: !`npm test`
✓ All checks passed
## Deployment
<!-- Actual deployment happens here -->
<!-- Uses blue-green strategy for zero-downtime -->
Deploying to $1 environment...
[Deployment steps...]
<!-- Post-deployment verification -->
Verifying deployment health...
[Health checks...]
Deployment complete!
## Next Steps
<!-- Guide user on what to do after deployment -->
1. Monitor logs: /logs $1
2. Run smoke tests: /smoke-test $1
3. Notify team: /notify-deployment $1
```
### Decision Point Documentation
```markdown
---
description: Interactive deployment command
---
# Interactive Deployment
## Configuration Review
Target: $1
Current version: !`cat version.txt`
New version: $2
<!-- DECISION POINT: User confirms configuration -->
<!-- This pause allows user to verify everything is correct -->
<!-- We can't automatically proceed because deployment is risky -->
Review the above configuration.
**Continue with deployment?**
- Reply "yes" to proceed
- Reply "no" to cancel
- Reply "edit" to modify configuration
[Await user input before continuing...]
<!-- After user confirms, we proceed with deployment -->
<!-- All subsequent steps are automated -->
Proceeding with deployment...
```
## Help Text Patterns
### Built-in Help Command
Create a help subcommand for complex commands:
```markdown
---
description: Main command with help
argument-hint: [subcommand] [args]
---
# Command Processor
if [ "$1" = "help" ] || [ "$1" = "--help" ] || [ "$1" = "-h" ]; then
**Command Help**
USAGE:
/command [subcommand] [args]
SUBCOMMANDS:
init [name] Initialize new configuration
deploy [env] Deploy to environment
status Show current status
rollback Rollback last deployment
help Show this help
EXAMPLES:
/command init my-project
/command deploy staging
/command status
/command rollback
For detailed help on a subcommand:
/command [subcommand] --help
Exit.
fi
[Regular command processing...]
```
### Contextual Help
Provide help based on context:
```markdown
---
description: Context-aware command
argument-hint: [operation] [target]
---
# Context-Aware Operation
if [ -z "$1" ]; then
**No operation specified**
Available operations:
- analyze: Analyze target for issues
- fix: Apply automatic fixes
- report: Generate detailed report
Usage: /command [operation] [target]
Examples:
/command analyze src/
/command fix src/app.js
/command report
Run /command help for more details.
Exit.
fi
[Command continues if operation provided...]
```
## Error Message Documentation
### Helpful Error Messages
```markdown
---
description: Command with good error messages
---
# Validation Command
if [ -z "$1" ]; then
❌ ERROR: Missing required argument
The 'file-path' argument is required.
USAGE:
/validate [file-path]
EXAMPLE:
/validate src/app.js
Try again with a file path.
Exit.
fi
if [ ! -f "$1" ]; then
❌ ERROR: File not found: $1
The specified file does not exist or is not accessible.
COMMON CAUSES:
1. Typo in file path
2. File was deleted or moved
3. Insufficient permissions
SUGGESTIONS:
- Check spelling: $1
- Verify file exists: ls -la $(dirname "$1")
- Check permissions: ls -l "$1"
Exit.
fi
[Command continues if validation passes...]
```
### Error Recovery Guidance
```markdown
---
description: Command with recovery guidance
---
# Operation Command
Running operation...
!`risky-operation.sh`
if [ $? -ne 0 ]; then
❌ OPERATION FAILED
The operation encountered an error and could not complete.
WHAT HAPPENED:
The risky-operation.sh script returned a non-zero exit code.
WHAT THIS MEANS:
- Changes may be partially applied
- System may be in inconsistent state
- Manual intervention may be needed
RECOVERY STEPS:
1. Check operation logs: cat /tmp/operation.log
2. Verify system state: /check-state
3. If needed, rollback: /rollback-operation
4. Fix underlying issue
5. Retry operation: /retry-operation
NEED HELP?
- Check troubleshooting guide: /help troubleshooting
- Contact support with error code: ERR_OP_FAILED_001
Exit.
fi
```
## Usage Example Documentation
### Embedded Examples
```markdown
---
description: Command with embedded examples
---
# Feature Command
This command performs feature analysis with multiple options.
## Basic Usage
\`\`\`
/feature analyze src/
\`\`\`
Analyzes all files in src/ directory for feature usage.
## Advanced Usage
\`\`\`
/feature analyze src/ --detailed
\`\`\`
Provides detailed analysis including:
- Feature breakdown by file
- Usage patterns
- Optimization suggestions
## Use Cases
**Use Case 1: Quick overview**
\`\`\`
/feature analyze .
\`\`\`
Get high-level feature summary of entire project.
**Use Case 2: Specific directory**
\`\`\`
/feature analyze src/components
\`\`\`
Focus analysis on components directory only.
**Use Case 3: Comparison**
\`\`\`
/feature analyze src/ --compare baseline.json
\`\`\`
Compare current features against baseline.
---
Now processing your request...
[Command implementation...]
```
### Example-Driven Documentation
```markdown
---
description: Example-heavy command
---
# Transformation Command
## What This Does
Transforms data from one format to another.
## Examples First
### Example 1: JSON to YAML
**Input:** `data.json`
\`\`\`json
{"name": "test", "value": 42}
\`\`\`
**Command:** `/transform data.json yaml`
**Output:** `data.yaml`
\`\`\`yaml
name: test
value: 42
\`\`\`
### Example 2: CSV to JSON
**Input:** `data.csv`
\`\`\`csv
name,value
test,42
\`\`\`
**Command:** `/transform data.csv json`
**Output:** `data.json`
\`\`\`json
[{"name": "test", "value": "42"}]
\`\`\`
### Example 3: With Options
**Command:** `/transform data.json yaml --pretty --sort-keys`
**Result:** Formatted YAML with sorted keys
---
## Your Transformation
File: $1
Format: $2
[Perform transformation...]
```
## Maintenance Documentation
### Version and Changelog
```markdown
<!--
VERSION: 2.1.0
LAST UPDATED: 2025-01-15
AUTHOR: DevOps Team
CHANGELOG:
v2.1.0 (2025-01-15):
- Added support for YAML configuration
- Improved error messages
- Fixed bug with special characters in arguments
v2.0.0 (2025-01-01):
- BREAKING: Changed argument order
- BREAKING: Removed deprecated --old-flag
- Added new validation checks
- Migration guide: /migration-v2
v1.5.0 (2024-12-15):
- Added --verbose flag
- Improved performance by 50%
v1.0.0 (2024-12-01):
- Initial stable release
MIGRATION NOTES:
From v1.x to v2.0:
Old: /command arg1 arg2 --old-flag
New: /command arg2 arg1
The --old-flag is removed. Use --new-flag instead.
DEPRECATION WARNINGS:
- The --legacy-mode flag is deprecated as of v2.1.0
- Will be removed in v3.0.0 (estimated 2025-06-01)
- Use --modern-mode instead
KNOWN ISSUES:
- #123: Slow performance with large files (workaround: use --stream flag)
- #456: Special characters in Windows (fix planned for v2.2.0)
-->
```
### Maintenance Notes
```markdown
<!--
MAINTENANCE NOTES:
CODE STRUCTURE:
- Lines 1-50: Argument parsing and validation
- Lines 51-100: Main processing logic
- Lines 101-150: Output formatting
- Lines 151-200: Error handling
DEPENDENCIES:
- Requires git 2.x or later
- Uses jq for JSON processing
- Needs bash 4.0+ for associative arrays
PERFORMANCE:
- Fast path for small inputs (< 1MB)
- Streams large files to avoid memory issues
- Caches results in /tmp for 1 hour
SECURITY CONSIDERATIONS:
- Validates all inputs to prevent injection
- Uses allowed-tools to limit Bash access
- No credentials in command file
TESTING:
- Unit tests: tests/command-test.sh
- Integration tests: tests/integration/
- Manual test checklist: tests/manual-checklist.md
FUTURE IMPROVEMENTS:
- TODO: Add support for TOML format
- TODO: Implement parallel processing
- TODO: Add progress bar for large files
RELATED FILES:
- lib/parser.sh: Shared parsing logic
- lib/formatter.sh: Output formatting
- config/defaults.yml: Default configuration
-->
```
## README Documentation
Commands should have companion README files:
```markdown
# Command Name
Brief description of what the command does.
## Installation
This command is part of the [plugin-name] plugin.
Install with:
\`\`\`
/plugin install plugin-name
\`\`\`
## Usage
Basic usage:
\`\`\`
/command-name [arg1] [arg2]
\`\`\`
## Arguments
- `arg1`: Description (required)
- `arg2`: Description (optional, defaults to X)
## Examples
### Example 1: Basic Usage
\`\`\`
/command-name value1 value2
\`\`\`
Description of what happens.
### Example 2: Advanced Usage
\`\`\`
/command-name value1 --option
\`\`\`
Description of advanced feature.
## Configuration
Optional configuration file: `.claude/command-name.local.md`
\`\`\`markdown
---
default_arg: value
enable_feature: true
---
\`\`\`
## Requirements
- Git 2.x or later
- jq (for JSON processing)
- Node.js 14+ (optional, for advanced features)
## Troubleshooting
### Issue: Command not found
**Solution:** Ensure plugin is installed and enabled.
### Issue: Permission denied
**Solution:** Check file permissions and allowed-tools setting.
## Contributing
Contributions welcome! See [CONTRIBUTING.md](CONTRIBUTING.md).
## License
MIT License - See [LICENSE](LICENSE).
## Support
- Issues: https://github.com/user/plugin/issues
- Docs: https://docs.example.com
- Email: support@example.com
```
## Best Practices
### Documentation Principles
1. **Write for your future self**: Assume you'll forget details
2. **Examples before explanations**: Show, then tell
3. **Progressive disclosure**: Basic info first, details available
4. **Keep it current**: Update docs when code changes
5. **Test your docs**: Verify examples actually work
### Documentation Locations
1. **In command file**: Core usage, examples, inline explanations
2. **README**: Installation, configuration, troubleshooting
3. **Separate docs**: Detailed guides, tutorials, API reference
4. **Comments**: Implementation details for maintainers
### Documentation Style
1. **Clear and concise**: No unnecessary words
2. **Active voice**: "Run the command" not "The command can be run"
3. **Consistent terminology**: Use same terms throughout
4. **Formatted well**: Use headings, lists, code blocks
5. **Accessible**: Assume reader is beginner
### Documentation Maintenance
1. **Version everything**: Track what changed when
2. **Deprecate gracefully**: Warn before removing features
3. **Migration guides**: Help users upgrade
4. **Archive old docs**: Keep old versions accessible
5. **Review regularly**: Ensure docs match reality
## Documentation Checklist
Before releasing a command:
- [ ] Description in frontmatter is clear
- [ ] argument-hint documents all arguments
- [ ] Usage examples in comments
- [ ] Common use cases shown
- [ ] Error messages are helpful
- [ ] Requirements documented
- [ ] Related commands listed
- [ ] Changelog maintained
- [ ] Version number updated
- [ ] README created/updated
- [ ] Examples actually work
- [ ] Troubleshooting section complete
With good documentation, commands become self-service, reducing support burden and improving user experience.

View File

@@ -0,0 +1,463 @@
# Command Frontmatter Reference
Complete reference for YAML frontmatter fields in slash commands.
## Frontmatter Overview
YAML frontmatter is optional metadata at the start of command files:
```markdown
---
description: Brief description
allowed-tools: Read, Write
model: sonnet
argument-hint: [arg1] [arg2]
---
Command prompt content here...
```
All fields are optional. Commands work without any frontmatter.
## Field Specifications
### description
**Type:** String
**Required:** No
**Default:** First line of command prompt
**Max Length:** ~60 characters recommended for `/help` display
**Purpose:** Describes what the command does, shown in `/help` output
**Examples:**
```yaml
description: Review code for security issues
```
```yaml
description: Deploy to staging environment
```
```yaml
description: Generate API documentation
```
**Best practices:**
- Keep under 60 characters for clean display
- Start with verb (Review, Deploy, Generate)
- Be specific about what command does
- Avoid redundant "command" or "slash command"
**Good:**
- ✅ "Review PR for code quality and security"
- ✅ "Deploy application to specified environment"
- ✅ "Generate comprehensive API documentation"
**Bad:**
- ❌ "This command reviews PRs" (unnecessary "This command")
- ❌ "Review" (too vague)
- ❌ "A command that reviews pull requests for code quality, security issues, and best practices" (too long)
### allowed-tools
**Type:** String or Array of strings
**Required:** No
**Default:** Inherits from conversation permissions
**Purpose:** Restrict or specify which tools command can use
**Formats:**
**Single tool:**
```yaml
allowed-tools: Read
```
**Multiple tools (comma-separated):**
```yaml
allowed-tools: Read, Write, Edit
```
**Multiple tools (array):**
```yaml
allowed-tools:
- Read
- Write
- Bash(git:*)
```
**Tool Patterns:**
**Specific tools:**
```yaml
allowed-tools: Read, Grep, Edit
```
**Bash with command filter:**
```yaml
allowed-tools: Bash(git:*) # Only git commands
allowed-tools: Bash(npm:*) # Only npm commands
allowed-tools: Bash(docker:*) # Only docker commands
```
**All tools (not recommended):**
```yaml
allowed-tools: "*"
```
**When to use:**
1. **Security:** Restrict command to safe operations
```yaml
allowed-tools: Read, Grep # Read-only command
```
2. **Clarity:** Document required tools
```yaml
allowed-tools: Bash(git:*), Read
```
3. **Bash execution:** Enable bash command output
```yaml
allowed-tools: Bash(git status:*), Bash(git diff:*)
```
**Best practices:**
- Be as restrictive as possible
- Use command filters for Bash (e.g., `git:*` not `*`)
- Only specify when different from conversation permissions
- Document why specific tools are needed
### model
**Type:** String
**Required:** No
**Default:** Inherits from conversation
**Values:** `sonnet`, `opus`, `haiku`
**Purpose:** Specify which Claude model executes the command
**Examples:**
```yaml
model: haiku # Fast, efficient for simple tasks
```
```yaml
model: sonnet # Balanced performance (default)
```
```yaml
model: opus # Maximum capability for complex tasks
```
**When to use:**
**Use `haiku` for:**
- Simple, formulaic commands
- Fast execution needed
- Low complexity tasks
- Frequent invocations
```yaml
---
description: Format code file
model: haiku
---
```
**Use `sonnet` for:**
- Standard commands (default)
- Balanced speed/quality
- Most common use cases
```yaml
---
description: Review code changes
model: sonnet
---
```
**Use `opus` for:**
- Complex analysis
- Architectural decisions
- Deep code understanding
- Critical tasks
```yaml
---
description: Analyze system architecture
model: opus
---
```
**Best practices:**
- Omit unless specific need
- Use `haiku` for speed when possible
- Reserve `opus` for genuinely complex tasks
- Test with different models to find right balance
### argument-hint
**Type:** String
**Required:** No
**Default:** None
**Purpose:** Document expected arguments for users and autocomplete
**Format:**
```yaml
argument-hint: [arg1] [arg2] [optional-arg]
```
**Examples:**
**Single argument:**
```yaml
argument-hint: [pr-number]
```
**Multiple required arguments:**
```yaml
argument-hint: [environment] [version]
```
**Optional arguments:**
```yaml
argument-hint: [file-path] [options]
```
**Descriptive names:**
```yaml
argument-hint: [source-branch] [target-branch] [commit-message]
```
**Best practices:**
- Use square brackets `[]` for each argument
- Use descriptive names (not `arg1`, `arg2`)
- Indicate optional vs required in description
- Match order to positional arguments in command
- Keep concise but clear
**Examples by pattern:**
**Simple command:**
```yaml
---
description: Fix issue by number
argument-hint: [issue-number]
---
Fix issue #$1...
```
**Multi-argument:**
```yaml
---
description: Deploy to environment
argument-hint: [app-name] [environment] [version]
---
Deploy $1 to $2 using version $3...
```
**With options:**
```yaml
---
description: Run tests with options
argument-hint: [test-pattern] [options]
---
Run tests matching $1 with options: $2
```
### disable-model-invocation
**Type:** Boolean
**Required:** No
**Default:** false
**Purpose:** Prevent SlashCommand tool from programmatically invoking command
**Examples:**
```yaml
disable-model-invocation: true
```
**When to use:**
1. **Manual-only commands:** Commands requiring user judgment
```yaml
---
description: Approve deployment to production
disable-model-invocation: true
---
```
2. **Destructive operations:** Commands with irreversible effects
```yaml
---
description: Delete all test data
disable-model-invocation: true
---
```
3. **Interactive workflows:** Commands needing user input
```yaml
---
description: Walk through setup wizard
disable-model-invocation: true
---
```
**Default behavior (false):**
- Command available to SlashCommand tool
- Claude can invoke programmatically
- Still available for manual invocation
**When true:**
- Command only invokable by user typing `/command`
- Not available to SlashCommand tool
- Safer for sensitive operations
**Best practices:**
- Use sparingly (limits Claude's autonomy)
- Document why in command comments
- Consider if command should exist if always manual
## Complete Examples
### Minimal Command
No frontmatter needed:
```markdown
Review this code for common issues and suggest improvements.
```
### Simple Command
Just description:
```markdown
---
description: Review code for issues
---
Review this code for common issues and suggest improvements.
```
### Standard Command
Description and tools:
```markdown
---
description: Review Git changes
allowed-tools: Bash(git:*), Read
---
Current changes: !`git diff --name-only`
Review each changed file for:
- Code quality
- Potential bugs
- Best practices
```
### Complex Command
All common fields:
```markdown
---
description: Deploy application to environment
argument-hint: [app-name] [environment] [version]
allowed-tools: Bash(kubectl:*), Bash(helm:*), Read
model: sonnet
---
Deploy $1 to $2 environment using version $3
Pre-deployment checks:
- Verify $2 configuration
- Check cluster status: !`kubectl cluster-info`
- Validate version $3 exists
Proceed with deployment following deployment runbook.
```
### Manual-Only Command
Restricted invocation:
```markdown
---
description: Approve production deployment
argument-hint: [deployment-id]
disable-model-invocation: true
allowed-tools: Bash(gh:*)
---
<!--
MANUAL APPROVAL REQUIRED
This command requires human judgment and cannot be automated.
-->
Review deployment $1 for production approval:
Deployment details: !`gh api /deployments/$1`
Verify:
- All tests passed
- Security scan clean
- Stakeholder approval
- Rollback plan ready
Type "APPROVED" to confirm deployment.
```
## Validation
### Common Errors
**Invalid YAML syntax:**
```yaml
---
description: Missing quote
allowed-tools: Read, Write
model: sonnet
--- # ❌ Missing closing quote above
```
**Fix:** Validate YAML syntax
**Incorrect tool specification:**
```yaml
allowed-tools: Bash # ❌ Missing command filter
```
**Fix:** Use `Bash(git:*)` format
**Invalid model name:**
```yaml
model: gpt4 # ❌ Not a valid Claude model
```
**Fix:** Use `sonnet`, `opus`, or `haiku`
### Validation Checklist
Before committing command:
- [ ] YAML syntax valid (no errors)
- [ ] Description under 60 characters
- [ ] allowed-tools uses proper format
- [ ] model is valid value if specified
- [ ] argument-hint matches positional arguments
- [ ] disable-model-invocation used appropriately
## Best Practices Summary
1. **Start minimal:** Add frontmatter only when needed
2. **Document arguments:** Always use argument-hint with arguments
3. **Restrict tools:** Use most restrictive allowed-tools that works
4. **Choose right model:** Use haiku for speed, opus for complexity
5. **Manual-only sparingly:** Only use disable-model-invocation when necessary
6. **Clear descriptions:** Make commands discoverable in `/help`
7. **Test thoroughly:** Verify frontmatter works as expected

View File

@@ -0,0 +1,920 @@
# Interactive Command Patterns
Comprehensive guide to creating commands that gather user feedback and make decisions through the AskUserQuestion tool.
## Overview
Some commands need user input that doesn't work well with simple arguments. For example:
- Choosing between multiple complex options with trade-offs
- Selecting multiple items from a list
- Making decisions that require explanation
- Gathering preferences or configuration interactively
For these cases, use the **AskUserQuestion tool** within command execution rather than relying on command arguments.
## When to Use AskUserQuestion
### Use AskUserQuestion When:
1. **Multiple choice decisions** with explanations needed
2. **Complex options** that require context to choose
3. **Multi-select scenarios** (choosing multiple items)
4. **Preference gathering** for configuration
5. **Interactive workflows** that adapt based on answers
### Use Command Arguments When:
1. **Simple values** (file paths, numbers, names)
2. **Known inputs** user already has
3. **Scriptable workflows** that should be automatable
4. **Fast invocations** where prompting would slow down
## AskUserQuestion Basics
### Tool Parameters
```typescript
{
questions: [
{
question: "Which authentication method should we use?",
header: "Auth method", // Short label (max 12 chars)
multiSelect: false, // true for multiple selection
options: [
{
label: "OAuth 2.0",
description: "Industry standard, supports multiple providers"
},
{
label: "JWT",
description: "Stateless, good for APIs"
},
{
label: "Session",
description: "Traditional, server-side state"
}
]
}
]
}
```
**Key points:**
- Users can always choose "Other" to provide custom input (automatic)
- `multiSelect: true` allows selecting multiple options
- Options should be 2-4 choices (not more)
- Can ask 1-4 questions per tool call
## Command Pattern for User Interaction
### Basic Interactive Command
```markdown
---
description: Interactive setup command
allowed-tools: AskUserQuestion, Write
---
# Interactive Plugin Setup
This command will guide you through configuring the plugin with a series of questions.
## Step 1: Gather Configuration
Use the AskUserQuestion tool to ask:
**Question 1 - Deployment target:**
- header: "Deploy to"
- question: "Which deployment platform will you use?"
- options:
- AWS (Amazon Web Services with ECS/EKS)
- GCP (Google Cloud with GKE)
- Azure (Microsoft Azure with AKS)
- Local (Docker on local machine)
**Question 2 - Environment strategy:**
- header: "Environments"
- question: "How many environments do you need?"
- options:
- Single (Just production)
- Standard (Dev, Staging, Production)
- Complete (Dev, QA, Staging, Production)
**Question 3 - Features to enable:**
- header: "Features"
- question: "Which features do you want to enable?"
- multiSelect: true
- options:
- Auto-scaling (Automatic resource scaling)
- Monitoring (Health checks and metrics)
- CI/CD (Automated deployment pipeline)
- Backups (Automated database backups)
## Step 2: Process Answers
Based on the answers received from AskUserQuestion:
1. Parse the deployment target choice
2. Set up environment-specific configuration
3. Enable selected features
4. Generate configuration files
## Step 3: Generate Configuration
Create `.claude/plugin-name.local.md` with:
\`\`\`yaml
---
deployment_target: [answer from Q1]
environments: [answer from Q2]
features:
auto_scaling: [true if selected in Q3]
monitoring: [true if selected in Q3]
ci_cd: [true if selected in Q3]
backups: [true if selected in Q3]
---
# Plugin Configuration
Generated: [timestamp]
Target: [deployment_target]
Environments: [environments]
\`\`\`
## Step 4: Confirm and Next Steps
Confirm configuration created and guide user on next steps.
```
### Multi-Stage Interactive Workflow
```markdown
---
description: Multi-stage interactive workflow
allowed-tools: AskUserQuestion, Read, Write, Bash
---
# Multi-Stage Deployment Setup
This command walks through deployment setup in stages, adapting based on your answers.
## Stage 1: Basic Configuration
Use AskUserQuestion to ask about deployment basics.
Based on answers, determine which additional questions to ask.
## Stage 2: Advanced Options (Conditional)
If user selected "Advanced" deployment in Stage 1:
Use AskUserQuestion to ask about:
- Load balancing strategy
- Caching configuration
- Security hardening options
If user selected "Simple" deployment:
- Skip advanced questions
- Use sensible defaults
## Stage 3: Confirmation
Show summary of all selections.
Use AskUserQuestion for final confirmation:
- header: "Confirm"
- question: "Does this configuration look correct?"
- options:
- Yes (Proceed with setup)
- No (Start over)
- Modify (Let me adjust specific settings)
If "Modify", ask which specific setting to change.
## Stage 4: Execute Setup
Based on confirmed configuration, execute setup steps.
```
## Interactive Question Design
### Question Structure
**Good questions:**
```markdown
Question: "Which database should we use for this project?"
Header: "Database"
Options:
- PostgreSQL (Relational, ACID compliant, best for complex queries)
- MongoDB (Document store, flexible schema, best for rapid iteration)
- Redis (In-memory, fast, best for caching and sessions)
```
**Poor questions:**
```markdown
Question: "Database?" // Too vague
Header: "DB" // Unclear abbreviation
Options:
- Option 1 // Not descriptive
- Option 2
```
### Option Design Best Practices
**Clear labels:**
- Use 1-5 words
- Specific and descriptive
- No jargon without context
**Helpful descriptions:**
- Explain what the option means
- Mention key benefits or trade-offs
- Help user make informed decision
- Keep to 1-2 sentences
**Appropriate number:**
- 2-4 options per question
- Don't overwhelm with too many choices
- Group related options
- "Other" automatically provided
### Multi-Select Questions
**When to use multiSelect:**
```markdown
Use AskUserQuestion for enabling features:
Question: "Which features do you want to enable?"
Header: "Features"
multiSelect: true // Allow selecting multiple
Options:
- Logging (Detailed operation logs)
- Metrics (Performance monitoring)
- Alerts (Error notifications)
- Backups (Automatic backups)
```
User can select any combination: none, some, or all.
**When NOT to use multiSelect:**
```markdown
Question: "Which authentication method?"
multiSelect: false // Only one auth method makes sense
```
Mutually exclusive choices should not use multiSelect.
## Command Patterns with AskUserQuestion
### Pattern 1: Simple Yes/No Decision
```markdown
---
description: Command with confirmation
allowed-tools: AskUserQuestion, Bash
---
# Destructive Operation
This operation will delete all cached data.
Use AskUserQuestion to confirm:
Question: "This will delete all cached data. Are you sure?"
Header: "Confirm"
Options:
- Yes (Proceed with deletion)
- No (Cancel operation)
If user selects "Yes":
Execute deletion
Report completion
If user selects "No":
Cancel operation
Exit without changes
```
### Pattern 2: Multiple Configuration Questions
```markdown
---
description: Multi-question configuration
allowed-tools: AskUserQuestion, Write
---
# Project Configuration Setup
Gather configuration through multiple questions.
Use AskUserQuestion with multiple questions in one call:
**Question 1:**
- question: "Which programming language?"
- header: "Language"
- options: Python, TypeScript, Go, Rust
**Question 2:**
- question: "Which test framework?"
- header: "Testing"
- options: Jest, PyTest, Go Test, Cargo Test
(Adapt based on language from Q1)
**Question 3:**
- question: "Which CI/CD platform?"
- header: "CI/CD"
- options: GitHub Actions, GitLab CI, CircleCI
**Question 4:**
- question: "Which features do you need?"
- header: "Features"
- multiSelect: true
- options: Linting, Type checking, Code coverage, Security scanning
Process all answers together to generate cohesive configuration.
```
### Pattern 3: Conditional Question Flow
```markdown
---
description: Conditional interactive workflow
allowed-tools: AskUserQuestion, Read, Write
---
# Adaptive Configuration
## Question 1: Deployment Complexity
Use AskUserQuestion:
Question: "How complex is your deployment?"
Header: "Complexity"
Options:
- Simple (Single server, straightforward)
- Standard (Multiple servers, load balancing)
- Complex (Microservices, orchestration)
## Conditional Questions Based on Answer
If answer is "Simple":
- No additional questions
- Use minimal configuration
If answer is "Standard":
- Ask about load balancing strategy
- Ask about scaling policy
If answer is "Complex":
- Ask about orchestration platform (Kubernetes, Docker Swarm)
- Ask about service mesh (Istio, Linkerd, None)
- Ask about monitoring (Prometheus, Datadog, CloudWatch)
- Ask about logging aggregation
## Process Conditional Answers
Generate configuration appropriate for selected complexity level.
```
### Pattern 4: Iterative Collection
```markdown
---
description: Collect multiple items iteratively
allowed-tools: AskUserQuestion, Write
---
# Collect Team Members
We'll collect team member information for the project.
## Question: How many team members?
Use AskUserQuestion:
Question: "How many team members should we set up?"
Header: "Team size"
Options:
- 2 people
- 3 people
- 4 people
- 6 people
## Iterate Through Team Members
For each team member (1 to N based on answer):
Use AskUserQuestion for member details:
Question: "What role for team member [number]?"
Header: "Role"
Options:
- Frontend Developer
- Backend Developer
- DevOps Engineer
- QA Engineer
- Designer
Store each member's information.
## Generate Team Configuration
After collecting all N members, create team configuration file with all members and their roles.
```
### Pattern 5: Dependency Selection
```markdown
---
description: Select dependencies with multi-select
allowed-tools: AskUserQuestion
---
# Configure Project Dependencies
## Question: Required Libraries
Use AskUserQuestion with multiSelect:
Question: "Which libraries does your project need?"
Header: "Dependencies"
multiSelect: true
Options:
- React (UI framework)
- Express (Web server)
- TypeORM (Database ORM)
- Jest (Testing framework)
- Axios (HTTP client)
User can select any combination.
## Process Selections
For each selected library:
- Add to package.json dependencies
- Generate sample configuration
- Create usage examples
- Update documentation
```
## Best Practices for Interactive Commands
### Question Design
1. **Clear and specific**: Question should be unambiguous
2. **Concise header**: Max 12 characters for clean display
3. **Helpful options**: Labels are clear, descriptions explain trade-offs
4. **Appropriate count**: 2-4 options per question, 1-4 questions per call
5. **Logical order**: Questions flow naturally
### Error Handling
```markdown
# Handle AskUserQuestion Responses
After calling AskUserQuestion, verify answers received:
If answers are empty or invalid:
Something went wrong gathering responses.
Please try again or provide configuration manually:
[Show alternative approach]
Exit.
If answers look correct:
Process as expected
```
### Progressive Disclosure
```markdown
# Start Simple, Get Detailed as Needed
## Question 1: Setup Type
Use AskUserQuestion:
Question: "How would you like to set up?"
Header: "Setup type"
Options:
- Quick (Use recommended defaults)
- Custom (Configure all options)
- Guided (Step-by-step with explanations)
If "Quick":
Apply defaults, minimal questions
If "Custom":
Ask all available configuration questions
If "Guided":
Ask questions with extra explanation
Provide recommendations along the way
```
### Multi-Select Guidelines
**Good multi-select use:**
```markdown
Question: "Which features do you want to enable?"
multiSelect: true
Options:
- Logging
- Metrics
- Alerts
- Backups
Reason: User might want any combination
```
**Bad multi-select use:**
```markdown
Question: "Which database engine?"
multiSelect: true // ❌ Should be single-select
Reason: Can only use one database engine
```
## Advanced Patterns
### Validation Loop
```markdown
---
description: Interactive with validation
allowed-tools: AskUserQuestion, Bash
---
# Setup with Validation
## Gather Configuration
Use AskUserQuestion to collect settings.
## Validate Configuration
Check if configuration is valid:
- Required dependencies available?
- Settings compatible with each other?
- No conflicts detected?
If validation fails:
Show validation errors
Use AskUserQuestion to ask:
Question: "Configuration has issues. What would you like to do?"
Header: "Next step"
Options:
- Fix (Adjust settings to resolve issues)
- Override (Proceed despite warnings)
- Cancel (Abort setup)
Based on answer, retry or proceed or exit.
```
### Build Configuration Incrementally
```markdown
---
description: Incremental configuration builder
allowed-tools: AskUserQuestion, Write, Read
---
# Incremental Setup
## Phase 1: Core Settings
Use AskUserQuestion for core settings.
Save to `.claude/config-partial.yml`
## Phase 2: Review Core Settings
Show user the core settings:
Based on these core settings, you need to configure:
- [Setting A] (because you chose [X])
- [Setting B] (because you chose [Y])
Ready to continue?
## Phase 3: Detailed Settings
Use AskUserQuestion for settings based on Phase 1 answers.
Merge with core settings.
## Phase 4: Final Review
Present complete configuration.
Use AskUserQuestion for confirmation:
Question: "Is this configuration correct?"
Options:
- Yes (Save and apply)
- No (Start over)
- Modify (Edit specific settings)
```
### Dynamic Options Based on Context
```markdown
---
description: Context-aware questions
allowed-tools: AskUserQuestion, Bash, Read
---
# Context-Aware Setup
## Detect Current State
Check existing configuration:
- Current language: !`detect-language.sh`
- Existing frameworks: !`detect-frameworks.sh`
- Available tools: !`check-tools.sh`
## Ask Context-Appropriate Questions
Based on detected language, ask relevant questions.
If language is TypeScript:
Use AskUserQuestion:
Question: "Which TypeScript features should we enable?"
Options:
- Strict Mode (Maximum type safety)
- Decorators (Experimental decorator support)
- Path Mapping (Module path aliases)
If language is Python:
Use AskUserQuestion:
Question: "Which Python tools should we configure?"
Options:
- Type Hints (mypy for type checking)
- Black (Code formatting)
- Pylint (Linting and style)
Questions adapt to project context.
```
## Real-World Example: Multi-Agent Swarm Launch
**From multi-agent-swarm plugin:**
```markdown
---
description: Launch multi-agent swarm
allowed-tools: AskUserQuestion, Read, Write, Bash
---
# Launch Multi-Agent Swarm
## Interactive Mode (No Task List Provided)
If user didn't provide task list file, help create one interactively.
### Question 1: Agent Count
Use AskUserQuestion:
Question: "How many agents should we launch?"
Header: "Agent count"
Options:
- 2 agents (Best for simple projects)
- 3 agents (Good for medium projects)
- 4 agents (Standard team size)
- 6 agents (Large projects)
- 8 agents (Complex multi-component projects)
### Question 2: Task Definition Approach
Use AskUserQuestion:
Question: "How would you like to define tasks?"
Header: "Task setup"
Options:
- File (I have a task list file ready)
- Guided (Help me create tasks interactively)
- Custom (Other approach)
If "File":
Ask for file path
Validate file exists and has correct format
If "Guided":
Enter iterative task creation mode (see below)
### Question 3: Coordination Mode
Use AskUserQuestion:
Question: "How should agents coordinate?"
Header: "Coordination"
Options:
- Team Leader (One agent coordinates others)
- Collaborative (Agents coordinate as peers)
- Autonomous (Independent work, minimal coordination)
### Iterative Task Creation (If "Guided" Selected)
For each agent (1 to N from Question 1):
**Question A: Agent Name**
Question: "What should we call agent [number]?"
Header: "Agent name"
Options:
- auth-agent
- api-agent
- ui-agent
- db-agent
(Provide relevant suggestions based on common patterns)
**Question B: Task Type**
Question: "What task for [agent-name]?"
Header: "Task type"
Options:
- Authentication (User auth, JWT, OAuth)
- API Endpoints (REST/GraphQL APIs)
- UI Components (Frontend components)
- Database (Schema, migrations, queries)
- Testing (Test suites and coverage)
- Documentation (Docs, README, guides)
**Question C: Dependencies**
Question: "What does [agent-name] depend on?"
Header: "Dependencies"
multiSelect: true
Options:
- [List of previously defined agents]
- No dependencies
**Question D: Base Branch**
Question: "Which base branch for PR?"
Header: "PR base"
Options:
- main
- staging
- develop
Store all task information for each agent.
### Generate Task List File
After collecting all agent task details:
1. Ask for project name
2. Generate task list in proper format
3. Save to `.daisy/swarm/tasks.md`
4. Show user the file path
5. Proceed with launch using generated task list
```
## Best Practices
### Question Writing
1. **Be specific**: "Which database?" not "Choose option?"
2. **Explain trade-offs**: Describe pros/cons in option descriptions
3. **Provide context**: Question text should stand alone
4. **Guide decisions**: Help user make informed choice
5. **Keep concise**: Header max 12 chars, descriptions 1-2 sentences
### Option Design
1. **Meaningful labels**: Specific, clear names
2. **Informative descriptions**: Explain what each option does
3. **Show trade-offs**: Help user understand implications
4. **Consistent detail**: All options equally explained
5. **2-4 options**: Not too few, not too many
### Flow Design
1. **Logical order**: Questions flow naturally
2. **Build on previous**: Later questions use earlier answers
3. **Minimize questions**: Ask only what's needed
4. **Group related**: Ask related questions together
5. **Show progress**: Indicate where in flow
### User Experience
1. **Set expectations**: Tell user what to expect
2. **Explain why**: Help user understand purpose
3. **Provide defaults**: Suggest recommended options
4. **Allow escape**: Let user cancel or restart
5. **Confirm actions**: Summarize before executing
## Common Patterns
### Pattern: Feature Selection
```markdown
Use AskUserQuestion:
Question: "Which features do you need?"
Header: "Features"
multiSelect: true
Options:
- Authentication
- Authorization
- Rate Limiting
- Caching
```
### Pattern: Environment Configuration
```markdown
Use AskUserQuestion:
Question: "Which environment is this?"
Header: "Environment"
Options:
- Development (Local development)
- Staging (Pre-production testing)
- Production (Live environment)
```
### Pattern: Priority Selection
```markdown
Use AskUserQuestion:
Question: "What's the priority for this task?"
Header: "Priority"
Options:
- Critical (Must be done immediately)
- High (Important, do soon)
- Medium (Standard priority)
- Low (Nice to have)
```
### Pattern: Scope Selection
```markdown
Use AskUserQuestion:
Question: "What scope should we analyze?"
Header: "Scope"
Options:
- Current file (Just this file)
- Current directory (All files in directory)
- Entire project (Full codebase scan)
```
## Combining Arguments and Questions
### Use Both Appropriately
**Arguments for known values:**
```markdown
---
argument-hint: [project-name]
allowed-tools: AskUserQuestion, Write
---
Setup for project: $1
Now gather additional configuration...
Use AskUserQuestion for options that require explanation.
```
**Questions for complex choices:**
```markdown
Project name from argument: $1
Now use AskUserQuestion to choose:
- Architecture pattern
- Technology stack
- Deployment strategy
These require explanation, so questions work better than arguments.
```
## Troubleshooting
**Questions not appearing:**
- Verify AskUserQuestion in allowed-tools
- Check question format is correct
- Ensure options array has 2-4 items
**User can't make selection:**
- Check option labels are clear
- Verify descriptions are helpful
- Consider if too many options
- Ensure multiSelect setting is correct
**Flow feels confusing:**
- Reduce number of questions
- Group related questions
- Add explanation between stages
- Show progress through workflow
With AskUserQuestion, commands become interactive wizards that guide users through complex decisions while maintaining the clarity that simple arguments provide for straightforward inputs.

View File

@@ -0,0 +1,904 @@
# Marketplace Considerations for Commands
Guidelines for creating commands designed for distribution and marketplace success.
## Overview
Commands distributed through marketplaces need additional consideration beyond personal use commands. They must work across environments, handle diverse use cases, and provide excellent user experience for unknown users.
## Design for Distribution
### Universal Compatibility
**Cross-platform considerations:**
```markdown
---
description: Cross-platform command
allowed-tools: Bash(*)
---
# Platform-Aware Command
Detecting platform...
case "$(uname)" in
Darwin*) PLATFORM="macOS" ;;
Linux*) PLATFORM="Linux" ;;
MINGW*|MSYS*|CYGWIN*) PLATFORM="Windows" ;;
*) PLATFORM="Unknown" ;;
esac
Platform: $PLATFORM
<!-- Adjust behavior based on platform -->
if [ "$PLATFORM" = "Windows" ]; then
# Windows-specific handling
PATH_SEP="\\"
NULL_DEVICE="NUL"
else
# Unix-like handling
PATH_SEP="/"
NULL_DEVICE="/dev/null"
fi
[Platform-appropriate implementation...]
```
**Avoid platform-specific commands:**
```markdown
<!-- BAD: macOS-specific -->
!`pbcopy < file.txt`
<!-- GOOD: Platform detection -->
if command -v pbcopy > /dev/null; then
pbcopy < file.txt
elif command -v xclip > /dev/null; then
xclip -selection clipboard < file.txt
elif command -v clip.exe > /dev/null; then
cat file.txt | clip.exe
else
echo "Clipboard not available on this platform"
fi
```
### Minimal Dependencies
**Check for required tools:**
```markdown
---
description: Dependency-aware command
allowed-tools: Bash(*)
---
# Check Dependencies
Required tools:
- git
- jq
- node
Checking availability...
MISSING_DEPS=""
for tool in git jq node; do
if ! command -v $tool > /dev/null; then
MISSING_DEPS="$MISSING_DEPS $tool"
fi
done
if [ -n "$MISSING_DEPS" ]; then
❌ ERROR: Missing required dependencies:$MISSING_DEPS
INSTALLATION:
- git: https://git-scm.com/downloads
- jq: https://stedolan.github.io/jq/download/
- node: https://nodejs.org/
Install missing tools and try again.
Exit.
fi
✓ All dependencies available
[Continue with command...]
```
**Document optional dependencies:**
```markdown
<!--
DEPENDENCIES:
Required:
- git 2.0+: Version control
- jq 1.6+: JSON processing
Optional:
- gh: GitHub CLI (for PR operations)
- docker: Container operations (for containerized tests)
Feature availability depends on installed tools.
-->
```
### Graceful Degradation
**Handle missing features:**
```markdown
---
description: Feature-aware command
---
# Feature Detection
Detecting available features...
FEATURES=""
if command -v gh > /dev/null; then
FEATURES="$FEATURES github"
fi
if command -v docker > /dev/null; then
FEATURES="$FEATURES docker"
fi
Available features: $FEATURES
if echo "$FEATURES" | grep -q "github"; then
# Full functionality with GitHub integration
echo "✓ GitHub integration available"
else
# Reduced functionality without GitHub
echo "⚠ Limited functionality: GitHub CLI not installed"
echo " Install 'gh' for full features"
fi
[Adapt behavior based on available features...]
```
## User Experience for Unknown Users
### Clear Onboarding
**First-run experience:**
```markdown
---
description: Command with onboarding
allowed-tools: Read, Write
---
# First Run Check
if [ ! -f ".claude/command-initialized" ]; then
**Welcome to Command Name!**
This appears to be your first time using this command.
WHAT THIS COMMAND DOES:
[Brief explanation of purpose and benefits]
QUICK START:
1. Basic usage: /command [arg]
2. For help: /command help
3. Examples: /command examples
SETUP:
No additional setup required. You're ready to go!
✓ Initialization complete
[Create initialization marker]
Ready to proceed with your request...
fi
[Normal command execution...]
```
**Progressive feature discovery:**
```markdown
---
description: Command with tips
---
# Command Execution
[Main functionality...]
---
💡 TIP: Did you know?
You can speed up this command with the --fast flag:
/command --fast [args]
For more tips: /command tips
```
### Comprehensive Error Handling
**Anticipate user mistakes:**
```markdown
---
description: Forgiving command
---
# User Input Handling
Argument: "$1"
<!-- Check for common typos -->
if [ "$1" = "hlep" ] || [ "$1" = "hepl" ]; then
Did you mean: help?
Showing help instead...
[Display help]
Exit.
fi
<!-- Suggest similar commands if not found -->
if [ "$1" != "valid-option1" ] && [ "$1" != "valid-option2" ]; then
❌ Unknown option: $1
Did you mean:
- valid-option1 (most similar)
- valid-option2
For all options: /command help
Exit.
fi
[Command continues...]
```
**Helpful diagnostics:**
```markdown
---
description: Diagnostic command
---
# Operation Failed
The operation could not complete.
**Diagnostic Information:**
Environment:
- Platform: $(uname)
- Shell: $SHELL
- Working directory: $(pwd)
- Command: /command $@
Checking common issues:
- Git repository: $(git rev-parse --git-dir 2>&1)
- Write permissions: $(test -w . && echo "OK" || echo "DENIED")
- Required files: $(test -f config.yml && echo "Found" || echo "Missing")
This information helps debug the issue.
For support, include the above diagnostics.
```
## Distribution Best Practices
### Namespace Awareness
**Avoid name collisions:**
```markdown
---
description: Namespaced command
---
<!--
COMMAND NAME: plugin-name-command
This command is namespaced with the plugin name to avoid
conflicts with commands from other plugins.
Alternative naming approaches:
- Use plugin prefix: /plugin-command
- Use category: /category-command
- Use verb-noun: /verb-noun
Chosen approach: plugin-name prefix
Reasoning: Clearest ownership, least likely to conflict
-->
# Plugin Name Command
[Implementation...]
```
**Document naming rationale:**
```markdown
<!--
NAMING DECISION:
Command name: /deploy-app
Alternatives considered:
- /deploy: Too generic, likely conflicts
- /app-deploy: Less intuitive ordering
- /my-plugin-deploy: Too verbose
Final choice balances:
- Discoverability (clear purpose)
- Brevity (easy to type)
- Uniqueness (unlikely conflicts)
-->
```
### Configurability
**User preferences:**
```markdown
---
description: Configurable command
allowed-tools: Read
---
# Load User Configuration
Default configuration:
- verbose: false
- color: true
- max_results: 10
Checking for user config: .claude/plugin-name.local.md
if [ -f ".claude/plugin-name.local.md" ]; then
# Parse YAML frontmatter for settings
VERBOSE=$(grep "^verbose:" .claude/plugin-name.local.md | cut -d: -f2 | tr -d ' ')
COLOR=$(grep "^color:" .claude/plugin-name.local.md | cut -d: -f2 | tr -d ' ')
MAX_RESULTS=$(grep "^max_results:" .claude/plugin-name.local.md | cut -d: -f2 | tr -d ' ')
echo "✓ Using user configuration"
else
echo "Using default configuration"
echo "Create .claude/plugin-name.local.md to customize"
fi
[Use configuration in command...]
```
**Sensible defaults:**
```markdown
---
description: Command with smart defaults
---
# Smart Defaults
Configuration:
- Format: ${FORMAT:-json} # Defaults to json
- Output: ${OUTPUT:-stdout} # Defaults to stdout
- Verbose: ${VERBOSE:-false} # Defaults to false
These defaults work for 80% of use cases.
Override with arguments:
/command --format yaml --output file.txt --verbose
Or set in .claude/plugin-name.local.md:
\`\`\`yaml
---
format: yaml
output: custom.txt
verbose: true
---
\`\`\`
```
### Version Compatibility
**Version checking:**
```markdown
---
description: Version-aware command
---
<!--
COMMAND VERSION: 2.1.0
COMPATIBILITY:
- Requires plugin version: >= 2.0.0
- Breaking changes from v1.x documented in MIGRATION.md
VERSION HISTORY:
- v2.1.0: Added --new-feature flag
- v2.0.0: BREAKING: Changed argument order
- v1.0.0: Initial release
-->
# Version Check
Command version: 2.1.0
Plugin version: [detect from plugin.json]
if [ "$PLUGIN_VERSION" < "2.0.0" ]; then
❌ ERROR: Incompatible plugin version
This command requires plugin version >= 2.0.0
Current version: $PLUGIN_VERSION
Update plugin:
/plugin update plugin-name
Exit.
fi
✓ Version compatible
[Command continues...]
```
**Deprecation warnings:**
```markdown
---
description: Command with deprecation warnings
---
# Deprecation Check
if [ "$1" = "--old-flag" ]; then
⚠️ DEPRECATION WARNING
The --old-flag option is deprecated as of v2.0.0
It will be removed in v3.0.0 (est. June 2025)
Use instead: --new-flag
Example:
Old: /command --old-flag value
New: /command --new-flag value
See migration guide: /command migrate
Continuing with deprecated behavior for now...
fi
[Handle both old and new flags during deprecation period...]
```
## Marketplace Presentation
### Command Discovery
**Descriptive naming:**
```markdown
---
description: Review pull request with security and quality checks
---
<!-- GOOD: Descriptive name and description -->
```
```markdown
---
description: Do the thing
---
<!-- BAD: Vague description -->
```
**Searchable keywords:**
```markdown
<!--
KEYWORDS: security, code-review, quality, validation, audit
These keywords help users discover this command when searching
for related functionality in the marketplace.
-->
```
### Showcase Examples
**Compelling demonstrations:**
```markdown
---
description: Advanced code analysis command
---
# Code Analysis Command
This command performs deep code analysis with actionable insights.
## Demo: Quick Security Audit
Try it now:
\`\`\`
/analyze-code src/ --security
\`\`\`
**What you'll get:**
- Security vulnerability detection
- Code quality metrics
- Performance bottleneck identification
- Actionable recommendations
**Sample output:**
\`\`\`
Security Analysis Results
=========================
🔴 Critical (2):
- SQL injection risk in users.js:45
- XSS vulnerability in display.js:23
🟡 Warnings (5):
- Unvalidated input in api.js:67
...
Recommendations:
1. Fix critical issues immediately
2. Review warnings before next release
3. Run /analyze-code --fix for auto-fixes
\`\`\`
---
Ready to analyze your code...
[Command implementation...]
```
### User Reviews and Feedback
**Feedback mechanism:**
```markdown
---
description: Command with feedback
---
# Command Complete
[Command results...]
---
**How was your experience?**
This helps improve the command for everyone.
Rate this command:
- 👍 Helpful
- 👎 Not helpful
- 🐛 Found a bug
- 💡 Have a suggestion
Reply with an emoji or:
- /command feedback
Your feedback matters!
```
**Usage analytics preparation:**
```markdown
<!--
ANALYTICS NOTES:
Track for improvement:
- Most common arguments
- Failure rates
- Average execution time
- User satisfaction scores
Privacy-preserving:
- No personally identifiable information
- Aggregate statistics only
- User opt-out respected
-->
```
## Quality Standards
### Professional Polish
**Consistent branding:**
```markdown
---
description: Branded command
---
# ✨ Command Name
Part of the [Plugin Name] suite
[Command functionality...]
---
**Need Help?**
- Documentation: https://docs.example.com
- Support: support@example.com
- Community: https://community.example.com
Powered by Plugin Name v2.1.0
```
**Attention to detail:**
```markdown
<!-- Details that matter -->
✓ Use proper emoji/symbols consistently
✓ Align output columns neatly
✓ Format numbers with thousands separators
✓ Use color/formatting appropriately
✓ Provide progress indicators
✓ Show estimated time remaining
✓ Confirm successful operations
```
### Reliability
**Idempotency:**
```markdown
---
description: Idempotent command
---
# Safe Repeated Execution
Checking if operation already completed...
if [ -f ".claude/operation-completed.flag" ]; then
Operation already completed
Completed at: $(cat .claude/operation-completed.flag)
To re-run:
1. Remove flag: rm .claude/operation-completed.flag
2. Run command again
Otherwise, no action needed.
Exit.
fi
Performing operation...
[Safe, repeatable operation...]
Marking complete...
echo "$(date)" > .claude/operation-completed.flag
```
**Atomic operations:**
```markdown
---
description: Atomic command
---
# Atomic Operation
This operation is atomic - either fully succeeds or fully fails.
Creating temporary workspace...
TEMP_DIR=$(mktemp -d)
Performing changes in isolated environment...
[Make changes in $TEMP_DIR]
if [ $? -eq 0 ]; then
✓ Changes validated
Applying changes atomically...
mv $TEMP_DIR/* ./target/
✓ Operation complete
else
❌ Changes failed validation
Rolling back...
rm -rf $TEMP_DIR
No changes applied. Safe to retry.
fi
```
## Testing for Distribution
### Pre-Release Checklist
```markdown
<!--
PRE-RELEASE CHECKLIST:
Functionality:
- [ ] Works on macOS
- [ ] Works on Linux
- [ ] Works on Windows (WSL)
- [ ] All arguments tested
- [ ] Error cases handled
- [ ] Edge cases covered
User Experience:
- [ ] Clear description
- [ ] Helpful error messages
- [ ] Examples provided
- [ ] First-run experience good
- [ ] Documentation complete
Distribution:
- [ ] No hardcoded paths
- [ ] Dependencies documented
- [ ] Configuration options clear
- [ ] Version number set
- [ ] Changelog updated
Quality:
- [ ] No TODO comments
- [ ] No debug code
- [ ] Performance acceptable
- [ ] Security reviewed
- [ ] Privacy considered
Support:
- [ ] README complete
- [ ] Troubleshooting guide
- [ ] Support contact provided
- [ ] Feedback mechanism
- [ ] License specified
-->
```
### Beta Testing
**Beta release approach:**
```markdown
---
description: Beta command (v0.9.0)
---
# 🧪 Beta Command
**This is a beta release**
Features may change based on feedback.
BETA STATUS:
- Version: 0.9.0
- Stability: Experimental
- Support: Limited
- Feedback: Encouraged
Known limitations:
- Performance not optimized
- Some edge cases not handled
- Documentation incomplete
Help improve this command:
- Report issues: /command report-issue
- Suggest features: /command suggest
- Join beta testers: /command join-beta
---
[Command implementation...]
---
**Thank you for beta testing!**
Your feedback helps make this command better.
```
## Maintenance and Updates
### Update Strategy
**Versioned commands:**
```markdown
<!--
VERSION STRATEGY:
Major (X.0.0): Breaking changes
- Document all breaking changes
- Provide migration guide
- Support old version briefly
Minor (x.Y.0): New features
- Backward compatible
- Announce new features
- Update examples
Patch (x.y.Z): Bug fixes
- No user-facing changes
- Update changelog
- Security fixes prioritized
Release schedule:
- Patches: As needed
- Minors: Monthly
- Majors: Annually or as needed
-->
```
**Update notifications:**
```markdown
---
description: Update-aware command
---
# Check for Updates
Current version: 2.1.0
Latest version: [check if available]
if [ "$CURRENT_VERSION" != "$LATEST_VERSION" ]; then
📢 UPDATE AVAILABLE
New version: $LATEST_VERSION
Current: $CURRENT_VERSION
What's new:
- Feature improvements
- Bug fixes
- Performance enhancements
Update with:
/plugin update plugin-name
Release notes: https://releases.example.com/v$LATEST_VERSION
fi
[Command continues...]
```
## Best Practices Summary
### Distribution Design
1. **Universal**: Works across platforms and environments
2. **Self-contained**: Minimal dependencies, clear requirements
3. **Graceful**: Degrades gracefully when features unavailable
4. **Forgiving**: Anticipates and handles user mistakes
5. **Helpful**: Clear errors, good defaults, excellent docs
### Marketplace Success
1. **Discoverable**: Clear name, good description, searchable keywords
2. **Professional**: Polished presentation, consistent branding
3. **Reliable**: Tested thoroughly, handles edge cases
4. **Maintainable**: Versioned, updated regularly, supported
5. **User-focused**: Great UX, responsive to feedback
### Quality Standards
1. **Complete**: Fully documented, all features working
2. **Tested**: Works in real environments, edge cases handled
3. **Secure**: No vulnerabilities, safe operations
4. **Performant**: Reasonable speed, resource-efficient
5. **Ethical**: Privacy-respecting, user consent
With these considerations, commands become marketplace-ready and delight users across diverse environments and use cases.

Some files were not shown because too many files have changed in this diff Show More