✨ Add design-pattern-learner skill with auto-trigger
NEW SKILL: Design Pattern Learner - Studies and implements web design patterns from external sources - Fetches gists, repositories, and webpages - Analyzes design tokens (colors, typography, spacing, effects) - Extracts UI components (buttons, cards, modals, etc.) - Generates implementation code (Tailwind, React, Vue) Auto-Trigger Patterns: - "study design from [URL]" - "learn from [source]" - "implement this design" - "copy style from" - "extract component from" Integration: - Works alongside ui-ux-pro-max for design guidance - Uses codebase-indexer to find implementation locations - Uses mcp-client for external content fetching - Added to always-use-superpowers decision tree Updated Files: - skills/design-pattern-learner/skill.md - Complete skill documentation - skills/design-pattern-learner/scripts/analyze.py - Pattern analyzer - skills/design-pattern-learner/scripts/generate.py - Implementation generator - skills/design-pattern-learner/README.md - Quick start guide - ralph-integration/dispatch/auto-triggers.yml - Priority 7 - skills/always-use-superpowers/SKILL.md - Decision tree updated 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
453
skills/design-pattern-learner/scripts/analyze.py
Executable file
453
skills/design-pattern-learner/scripts/analyze.py
Executable file
@@ -0,0 +1,453 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Design Pattern Analyzer
|
||||
Fetches and analyzes design patterns from external sources
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
from datetime import datetime
|
||||
import subprocess
|
||||
|
||||
class DesignPatternAnalyzer:
|
||||
def __init__(self, source_url):
|
||||
self.source_url = source_url
|
||||
self.patterns = {
|
||||
"source": source_url,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"design_tokens": {},
|
||||
"components": [],
|
||||
"layouts": [],
|
||||
"animations": []
|
||||
}
|
||||
|
||||
def fetch_source(self):
|
||||
"""Fetch content from URL/gist/repo"""
|
||||
print(f"📥 Fetching source: {self.source_url}")
|
||||
|
||||
# Detect source type
|
||||
if "gist.github.com" in self.source_url:
|
||||
return self._fetch_gist()
|
||||
elif "github.com" in self.source_url:
|
||||
return self._fetch_github_repo()
|
||||
else:
|
||||
return self._fetch_webpage()
|
||||
|
||||
def _fetch_gist(self):
|
||||
"""Fetch gist content using git"""
|
||||
print("🔍 Detected: GitHub Gist")
|
||||
|
||||
# Extract gist ID
|
||||
gist_match = re.search(r'gist\.github\.com/[^/]+/([a-f0-9]+)', self.source_url)
|
||||
if not gist_match:
|
||||
gist_match = re.search(r'gist\.github\.com/([a-f0-9]+)', self.source_url)
|
||||
|
||||
if gist_match:
|
||||
gist_id = gist_match.group(1)
|
||||
gist_url = f"https://gist.github.com/{gist_id}"
|
||||
|
||||
# Use curl to fetch
|
||||
result = subprocess.run(
|
||||
['curl', '-s', gist_url],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
return self._parse_gist_html(result.stdout)
|
||||
else:
|
||||
print(f"❌ Failed to fetch gist: {result.stderr}")
|
||||
return None
|
||||
else:
|
||||
print("❌ Could not extract gist ID")
|
||||
return None
|
||||
|
||||
def _fetch_github_repo(self):
|
||||
"""Fetch GitHub repository"""
|
||||
print("🔍 Detected: GitHub Repository")
|
||||
print("⚠️ Repo analysis requires cloning - will analyze README and structure")
|
||||
|
||||
# For now, analyze the main page
|
||||
result = subprocess.run(
|
||||
['curl', '-s', self.source_url],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
return self._parse_github_page(result.stdout)
|
||||
else:
|
||||
print(f"❌ Failed to fetch repo: {result.stderr}")
|
||||
return None
|
||||
|
||||
def _fetch_webpage(self):
|
||||
"""Fetch regular webpage"""
|
||||
print("🔍 Detected: Webpage")
|
||||
|
||||
result = subprocess.run(
|
||||
['curl', '-s', '-L', self.source_url],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
else:
|
||||
print(f"❌ Failed to fetch webpage: {result.stderr}")
|
||||
return None
|
||||
|
||||
def _parse_gist_html(self, html):
|
||||
"""Parse gist HTML to extract code"""
|
||||
print("📄 Parsing gist content...")
|
||||
|
||||
# Look for file content in gist HTML
|
||||
# Extract from <div class="highlight"> or similar
|
||||
file_pattern = r'<div class="[^"]*highlight[^"]*"[^>]*>(.*?)</div>'
|
||||
files = re.findall(file_pattern, html, re.DOTALL)
|
||||
|
||||
if files:
|
||||
# Clean up HTML entities and tags
|
||||
content = []
|
||||
for file_html in files:
|
||||
# Remove HTML tags but keep code
|
||||
clean_code = re.sub(r'<[^>]+>', '\n', file_html)
|
||||
clean_code = re.sub(r'<', '<', clean_code)
|
||||
clean_code = re.sub(r'>', '>', clean_code)
|
||||
clean_code = re.sub(r'&', '&', clean_code)
|
||||
clean_code = re.sub(r'\n\s*\n', '\n', clean_code)
|
||||
content.append(clean_code.strip())
|
||||
|
||||
return '\n\n'.join(content)
|
||||
else:
|
||||
print("⚠️ Could not extract gist files")
|
||||
return html
|
||||
|
||||
def _parse_github_page(self, html):
|
||||
"""Parse GitHub repo page"""
|
||||
print("📄 Parsing repository page...")
|
||||
return html
|
||||
|
||||
def analyze_design_tokens(self, content):
|
||||
"""Extract design tokens from content"""
|
||||
print("\n🎨 Extracting Design Tokens...")
|
||||
|
||||
tokens = {}
|
||||
|
||||
# Colors
|
||||
colors = self._extract_colors(content)
|
||||
if colors:
|
||||
tokens['colors'] = colors
|
||||
print(f" ✓ Found {len(colors)} color patterns")
|
||||
|
||||
# Typography
|
||||
typography = self._extract_typography(content)
|
||||
if typography:
|
||||
tokens['typography'] = typography
|
||||
print(f" ✓ Found {len(typography.get('fonts', []))} font patterns")
|
||||
|
||||
# Spacing
|
||||
spacing = self._extract_spacing(content)
|
||||
if spacing:
|
||||
tokens['spacing'] = spacing
|
||||
print(f" ✓ Found spacing patterns")
|
||||
|
||||
# Effects
|
||||
effects = self._extract_effects(content)
|
||||
if effects:
|
||||
tokens['effects'] = effects
|
||||
print(f" ✓ Found {len(effects.get('shadows', []))} shadow patterns")
|
||||
|
||||
self.patterns['design_tokens'] = tokens
|
||||
return tokens
|
||||
|
||||
def _extract_colors(self, content):
|
||||
"""Extract color patterns"""
|
||||
colors = {}
|
||||
|
||||
# Hex colors
|
||||
hex_colors = re.findall(r'#[0-9a-fA-F]{3,6}', content)
|
||||
if hex_colors:
|
||||
colors['hex'] = list(set(hex_colors))[:20] # Limit to 20 unique
|
||||
|
||||
# Tailwind color classes
|
||||
tailwind_colors = re.findall(r'\b(bg|text|border)-([a-z]+)-(\d+)\b', content)
|
||||
if tailwind_colors:
|
||||
colors['tailwind'] = [
|
||||
f"{prefix}-{color}-{shade}"
|
||||
for prefix, color, shade in tailwind_colors
|
||||
][:20]
|
||||
|
||||
# CSS color names
|
||||
css_colors = re.findall(r'\b(black|white|gray|red|blue|green|yellow|purple|pink|indigo)\b', content, re.I)
|
||||
if css_colors:
|
||||
colors['css'] = list(set(css_colors))
|
||||
|
||||
return colors if colors else None
|
||||
|
||||
def _extract_typography(self, content):
|
||||
"""Extract typography patterns"""
|
||||
typo = {}
|
||||
|
||||
# Font families
|
||||
fonts = re.findall(r'font-family:\s*([^;]+)', content, re.I)
|
||||
if fonts:
|
||||
typo['fonts'] = list(set(fonts))
|
||||
|
||||
# Font sizes
|
||||
sizes = re.findall(r'(text-|font-)?size:\s*([^;]+)', content, re.I)
|
||||
if sizes:
|
||||
typo['sizes'] = list(set([s[1] for s in sizes]))
|
||||
|
||||
# Tailwind text classes
|
||||
tailwind_text = re.findall(r'\b(text|font)-(xl|lg|md|sm|xs|\d)\b', content)
|
||||
if tailwind_text:
|
||||
typo['tailwind'] = [f"{prefix}-{size}" for prefix, size in tailwind_text]
|
||||
|
||||
return typo if typo else None
|
||||
|
||||
def _extract_spacing(self, content):
|
||||
"""Extract spacing patterns"""
|
||||
spacing = {}
|
||||
|
||||
# Tailwind spacing
|
||||
tailwind_spacing = re.findall(r'\b(p|m|px|py|pt|pb|pl|pr)-(\\d+)\b', content)
|
||||
if tailwind_spacing:
|
||||
spacing['tailwind'] = list(set([f"{prefix}-{size}" for prefix, size in tailwind_spacing]))
|
||||
|
||||
# CSS spacing
|
||||
css_spacing = re.findall(r'(padding|margin):\s*([^;]+)', content, re.I)
|
||||
if css_spacing:
|
||||
spacing['css'] = list(set([f"{prop}: {val}" for prop, val in css_spacing]))
|
||||
|
||||
return spacing if spacing else None
|
||||
|
||||
def _extract_effects(self, content):
|
||||
"""Extract visual effects"""
|
||||
effects = {}
|
||||
|
||||
# Shadows
|
||||
shadows = re.findall(r'box-shadow:\s*([^;]+)', content, re.I)
|
||||
if shadows:
|
||||
effects['shadows'] = list(set(shadows))
|
||||
|
||||
# Tailwind shadows
|
||||
tailwind_shadows = re.findall(r'\bshadow-(xl|lg|md|sm|none)\b', content)
|
||||
if tailwind_shadows:
|
||||
effects['tailwind_shadows'] = [f"shadow-{s}" for s in tailwind_shadows]
|
||||
|
||||
# Border radius
|
||||
radius = re.findall(r'border-radius:\s*([^;]+)', content, re.I)
|
||||
if radius:
|
||||
effects['radius'] = list(set(radius))
|
||||
|
||||
return effects if effects else None
|
||||
|
||||
def analyze_components(self, content):
|
||||
"""Extract UI components"""
|
||||
print("\n🧩 Identifying Components...")
|
||||
|
||||
components = []
|
||||
|
||||
# Look for common component patterns
|
||||
component_patterns = {
|
||||
'button': r'<button[^>]*>.*?</button>|class="[^"]*button[^"]*"',
|
||||
'card': r'class="[^"]*card[^"]*"',
|
||||
'modal': r'class="[^"]*modal[^"]*"',
|
||||
'nav': r'<nav[^>]*>.*?</nav>|class="[^"]*nav[^"]*"',
|
||||
'form': r'<form[^>]*>.*?</form>|class="[^"]*form[^"]*"',
|
||||
'input': r'<input[^>]*/?>|class="[^"]*input[^"]*"',
|
||||
'hero': r'class="[^"]*hero[^"]*"',
|
||||
'footer': r'<footer[^>]*>.*?</footer>',
|
||||
'header': r'<header[^>]*>.*?</header>',
|
||||
}
|
||||
|
||||
for comp_name, pattern in component_patterns.items():
|
||||
matches = re.findall(pattern, content, re.DOTALL | re.IGNORECASE)
|
||||
if matches:
|
||||
components.append({
|
||||
'type': comp_name,
|
||||
'count': len(matches),
|
||||
'examples': matches[:3] # First 3 examples
|
||||
})
|
||||
print(f" ✓ Found {len(matches)} {comp_name} component(s)")
|
||||
|
||||
self.patterns['components'] = components
|
||||
return components
|
||||
|
||||
def analyze_layouts(self, content):
|
||||
"""Extract layout patterns"""
|
||||
print("\n📐 Analyzing Layouts...")
|
||||
|
||||
layouts = []
|
||||
|
||||
# Grid layouts
|
||||
grids = re.findall(r'grid-cols-\\d+', content)
|
||||
if grids:
|
||||
layouts.append({
|
||||
'type': 'grid',
|
||||
'variants': list(set(grids))
|
||||
})
|
||||
print(f" ✓ Found grid layouts: {list(set(grids))}")
|
||||
|
||||
# Flexbox
|
||||
flex_patterns = re.findall(r'flex-(row|col|wrap|nowrap)', content)
|
||||
if flex_patterns:
|
||||
layouts.append({
|
||||
'type': 'flexbox',
|
||||
'patterns': list(set(flex_patterns))
|
||||
})
|
||||
print(f" ✓ Found flexbox patterns: {list(set(flex_patterns))}")
|
||||
|
||||
# Containers
|
||||
containers = re.findall(r'class="[^"]*container[^"]*"', content)
|
||||
if containers:
|
||||
layouts.append({
|
||||
'type': 'container',
|
||||
'count': len(containers)
|
||||
})
|
||||
print(f" ✓ Found {len(containers)} container(s)")
|
||||
|
||||
self.patterns['layouts'] = layouts
|
||||
return layouts
|
||||
|
||||
def analyze_animations(self, content):
|
||||
"""Extract animation patterns"""
|
||||
print("\n✨ Analyzing Animations...")
|
||||
|
||||
animations = []
|
||||
|
||||
# Transitions
|
||||
transitions = re.findall(r'transition(?:-[^:]*)?:\\s*([^;]+)', content, re.I)
|
||||
if transitions:
|
||||
animations.append({
|
||||
'type': 'transition',
|
||||
'patterns': list(set(transitions))[:10]
|
||||
})
|
||||
print(f" ✓ Found transitions: {len(set(transitions))}")
|
||||
|
||||
# Transforms
|
||||
transforms = re.findall(r'transform:\\s*([^;]+)', content, re.I)
|
||||
if transforms:
|
||||
animations.append({
|
||||
'type': 'transform',
|
||||
'patterns': list(set(transforms))
|
||||
})
|
||||
print(f" ✓ Found transforms: {len(set(transforms))}")
|
||||
|
||||
# Keyframes
|
||||
keyframes = re.findall(r'@keyframes\\s+(\\w+)', content)
|
||||
if keyframes:
|
||||
animations.append({
|
||||
'type': 'keyframe',
|
||||
'names': list(set(keyframes))
|
||||
})
|
||||
print(f" ✓ Found keyframes: {list(set(keyframes))}")
|
||||
|
||||
self.patterns['animations'] = animations
|
||||
return animations
|
||||
|
||||
def save_patterns(self):
|
||||
"""Save extracted patterns to file"""
|
||||
# Create data directory if needed
|
||||
data_dir = os.path.expanduser('~/.claude/skills/design-pattern-learner/data/patterns')
|
||||
os.makedirs(data_dir, exist_ok=True)
|
||||
|
||||
# Generate filename from URL
|
||||
safe_name = re.sub(r'[^a-zA-Z0-9]', '_', self.source_url)[:50]
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
filename = f"{safe_name}_{timestamp}.json"
|
||||
filepath = os.path.join(data_dir, filename)
|
||||
|
||||
# Save patterns
|
||||
with open(filepath, 'w') as f:
|
||||
json.dump(self.patterns, f, indent=2)
|
||||
|
||||
print(f"\n💾 Patterns saved to: {filepath}")
|
||||
|
||||
# Also save to learning history
|
||||
history_file = os.path.expanduser('~/.claude/skills/design-pattern-learner/learning-history.jsonl')
|
||||
with open(history_file, 'a') as f:
|
||||
f.write(json.dumps({
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'source': self.source_url,
|
||||
'file': filename,
|
||||
'patterns_count': len(self.patterns['components']),
|
||||
'success': True
|
||||
}) + '\n')
|
||||
|
||||
return filepath
|
||||
|
||||
def generate_summary(self):
|
||||
"""Generate human-readable summary"""
|
||||
print("\n" + "="*60)
|
||||
print("📊 DESIGN PATTERN ANALYSIS SUMMARY")
|
||||
print("="*60)
|
||||
|
||||
print(f"\n📍 Source: {self.source_url}")
|
||||
print(f"🕐 Analyzed: {self.patterns['timestamp']}")
|
||||
|
||||
# Design tokens
|
||||
if self.patterns['design_tokens']:
|
||||
print("\n🎨 Design Tokens:")
|
||||
for category, values in self.patterns['design_tokens'].items():
|
||||
print(f" • {category}: {len(values)} patterns")
|
||||
|
||||
# Components
|
||||
if self.patterns['components']:
|
||||
print(f"\n🧩 Components Found: {len(self.patterns['components'])}")
|
||||
for comp in self.patterns['components']:
|
||||
print(f" • {comp['type']}: {comp['count']} instance(s)")
|
||||
|
||||
# Layouts
|
||||
if self.patterns['layouts']:
|
||||
print(f"\n📐 Layout Patterns: {len(self.patterns['layouts'])}")
|
||||
for layout in self.patterns['layouts']:
|
||||
print(f" • {layout['type']}")
|
||||
|
||||
# Animations
|
||||
if self.patterns['animations']:
|
||||
print(f"\n✨ Animations: {len(self.patterns['animations'])}")
|
||||
for anim in self.patterns['animations']:
|
||||
print(f" • {anim['type']}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("✅ Analysis Complete!")
|
||||
print("="*60)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python3 analyze.py <URL>")
|
||||
print("Example: python3 analyze.py https://gist.github.com/user/id")
|
||||
sys.exit(1)
|
||||
|
||||
url = sys.argv[1]
|
||||
|
||||
analyzer = DesignPatternAnalyzer(url)
|
||||
|
||||
# Fetch content
|
||||
content = analyzer.fetch_source()
|
||||
if not content:
|
||||
print("❌ Failed to fetch content")
|
||||
sys.exit(1)
|
||||
|
||||
# Analyze patterns
|
||||
analyzer.analyze_design_tokens(content)
|
||||
analyzer.analyze_components(content)
|
||||
analyzer.analyze_layouts(content)
|
||||
analyzer.analyze_animations(content)
|
||||
|
||||
# Save results
|
||||
filepath = analyzer.save_patterns()
|
||||
|
||||
# Show summary
|
||||
analyzer.generate_summary()
|
||||
|
||||
print(f"\n💡 Next steps:")
|
||||
print(f" • View patterns: cat {filepath}")
|
||||
print(f" • Generate implementation: python3 generate.py --source {filepath}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user