SuperCharge Claude Code v1.0.0 - Complete Customization Package
Features: - 30+ Custom Skills (cognitive, development, UI/UX, autonomous agents) - RalphLoop autonomous agent integration - Multi-AI consultation (Qwen) - Agent management system with sync capabilities - Custom hooks for session management - MCP servers integration - Plugin marketplace setup - Comprehensive installation script Components: - Skills: always-use-superpowers, ralph, brainstorming, ui-ux-pro-max, etc. - Agents: 100+ agents across engineering, marketing, product, etc. - Hooks: session-start-superpowers, qwen-consult, ralph-auto-trigger - Commands: /brainstorm, /write-plan, /execute-plan - MCP Servers: zai-mcp-server, web-search-prime, web-reader, zread - Binaries: ralphloop wrapper Installation: ./supercharge.sh
This commit is contained in:
258
skills/ui-ux-pro-max/scripts/core.py
Normal file
258
skills/ui-ux-pro-max/scripts/core.py
Normal file
@@ -0,0 +1,258 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
UI/UX Pro Max Core - BM25 search engine for UI/UX style guides
|
||||
"""
|
||||
|
||||
import csv
|
||||
import re
|
||||
from pathlib import Path
|
||||
from math import log
|
||||
from collections import defaultdict
|
||||
|
||||
# ============ CONFIGURATION ============
|
||||
DATA_DIR = Path(__file__).parent.parent / "data"
|
||||
MAX_RESULTS = 3
|
||||
|
||||
CSV_CONFIG = {
|
||||
"style": {
|
||||
"file": "styles.csv",
|
||||
"search_cols": ["Style Category", "Keywords", "Best For", "Type"],
|
||||
"output_cols": ["Style Category", "Type", "Keywords", "Primary Colors", "Effects & Animation", "Best For", "Performance", "Accessibility", "Framework Compatibility", "Complexity"]
|
||||
},
|
||||
"prompt": {
|
||||
"file": "prompts.csv",
|
||||
"search_cols": ["Style Category", "AI Prompt Keywords (Copy-Paste Ready)", "CSS/Technical Keywords"],
|
||||
"output_cols": ["Style Category", "AI Prompt Keywords (Copy-Paste Ready)", "CSS/Technical Keywords", "Implementation Checklist"]
|
||||
},
|
||||
"color": {
|
||||
"file": "colors.csv",
|
||||
"search_cols": ["Product Type", "Keywords", "Notes"],
|
||||
"output_cols": ["Product Type", "Keywords", "Primary (Hex)", "Secondary (Hex)", "CTA (Hex)", "Background (Hex)", "Text (Hex)", "Border (Hex)", "Notes"]
|
||||
},
|
||||
"chart": {
|
||||
"file": "charts.csv",
|
||||
"search_cols": ["Data Type", "Keywords", "Best Chart Type", "Accessibility Notes"],
|
||||
"output_cols": ["Data Type", "Keywords", "Best Chart Type", "Secondary Options", "Color Guidance", "Accessibility Notes", "Library Recommendation", "Interactive Level"]
|
||||
},
|
||||
"landing": {
|
||||
"file": "landing.csv",
|
||||
"search_cols": ["Pattern Name", "Keywords", "Conversion Optimization", "Section Order"],
|
||||
"output_cols": ["Pattern Name", "Keywords", "Section Order", "Primary CTA Placement", "Color Strategy", "Conversion Optimization"]
|
||||
},
|
||||
"product": {
|
||||
"file": "products.csv",
|
||||
"search_cols": ["Product Type", "Keywords", "Primary Style Recommendation", "Key Considerations"],
|
||||
"output_cols": ["Product Type", "Keywords", "Primary Style Recommendation", "Secondary Styles", "Landing Page Pattern", "Dashboard Style (if applicable)", "Color Palette Focus"]
|
||||
},
|
||||
"ux": {
|
||||
"file": "ux-guidelines.csv",
|
||||
"search_cols": ["Category", "Issue", "Description", "Platform"],
|
||||
"output_cols": ["Category", "Issue", "Platform", "Description", "Do", "Don't", "Code Example Good", "Code Example Bad", "Severity"]
|
||||
},
|
||||
"typography": {
|
||||
"file": "typography.csv",
|
||||
"search_cols": ["Font Pairing Name", "Category", "Mood/Style Keywords", "Best For", "Heading Font", "Body Font"],
|
||||
"output_cols": ["Font Pairing Name", "Category", "Heading Font", "Body Font", "Mood/Style Keywords", "Best For", "Google Fonts URL", "CSS Import", "Tailwind Config", "Notes"]
|
||||
},
|
||||
"icons": {
|
||||
"file": "icons.csv",
|
||||
"search_cols": ["Category", "Icon Name", "Keywords", "Best For"],
|
||||
"output_cols": ["Category", "Icon Name", "Keywords", "Library", "Import Code", "Usage", "Best For", "Style"]
|
||||
},
|
||||
"react": {
|
||||
"file": "react-performance.csv",
|
||||
"search_cols": ["Category", "Issue", "Keywords", "Description"],
|
||||
"output_cols": ["Category", "Issue", "Platform", "Description", "Do", "Don't", "Code Example Good", "Code Example Bad", "Severity"]
|
||||
},
|
||||
"web": {
|
||||
"file": "web-interface.csv",
|
||||
"search_cols": ["Category", "Issue", "Keywords", "Description"],
|
||||
"output_cols": ["Category", "Issue", "Platform", "Description", "Do", "Don't", "Code Example Good", "Code Example Bad", "Severity"]
|
||||
}
|
||||
}
|
||||
|
||||
STACK_CONFIG = {
|
||||
"html-tailwind": {"file": "stacks/html-tailwind.csv"},
|
||||
"react": {"file": "stacks/react.csv"},
|
||||
"nextjs": {"file": "stacks/nextjs.csv"},
|
||||
"vue": {"file": "stacks/vue.csv"},
|
||||
"nuxtjs": {"file": "stacks/nuxtjs.csv"},
|
||||
"nuxt-ui": {"file": "stacks/nuxt-ui.csv"},
|
||||
"svelte": {"file": "stacks/svelte.csv"},
|
||||
"swiftui": {"file": "stacks/swiftui.csv"},
|
||||
"react-native": {"file": "stacks/react-native.csv"},
|
||||
"flutter": {"file": "stacks/flutter.csv"},
|
||||
"shadcn": {"file": "stacks/shadcn.csv"},
|
||||
"jetpack-compose": {"file": "stacks/jetpack-compose.csv"}
|
||||
}
|
||||
|
||||
# Common columns for all stacks
|
||||
_STACK_COLS = {
|
||||
"search_cols": ["Category", "Guideline", "Description", "Do", "Don't"],
|
||||
"output_cols": ["Category", "Guideline", "Description", "Do", "Don't", "Code Good", "Code Bad", "Severity", "Docs URL"]
|
||||
}
|
||||
|
||||
AVAILABLE_STACKS = list(STACK_CONFIG.keys())
|
||||
|
||||
|
||||
# ============ BM25 IMPLEMENTATION ============
|
||||
class BM25:
|
||||
"""BM25 ranking algorithm for text search"""
|
||||
|
||||
def __init__(self, k1=1.5, b=0.75):
|
||||
self.k1 = k1
|
||||
self.b = b
|
||||
self.corpus = []
|
||||
self.doc_lengths = []
|
||||
self.avgdl = 0
|
||||
self.idf = {}
|
||||
self.doc_freqs = defaultdict(int)
|
||||
self.N = 0
|
||||
|
||||
def tokenize(self, text):
|
||||
"""Lowercase, split, remove punctuation, filter short words"""
|
||||
text = re.sub(r'[^\w\s]', ' ', str(text).lower())
|
||||
return [w for w in text.split() if len(w) > 2]
|
||||
|
||||
def fit(self, documents):
|
||||
"""Build BM25 index from documents"""
|
||||
self.corpus = [self.tokenize(doc) for doc in documents]
|
||||
self.N = len(self.corpus)
|
||||
if self.N == 0:
|
||||
return
|
||||
self.doc_lengths = [len(doc) for doc in self.corpus]
|
||||
self.avgdl = sum(self.doc_lengths) / self.N
|
||||
|
||||
for doc in self.corpus:
|
||||
seen = set()
|
||||
for word in doc:
|
||||
if word not in seen:
|
||||
self.doc_freqs[word] += 1
|
||||
seen.add(word)
|
||||
|
||||
for word, freq in self.doc_freqs.items():
|
||||
self.idf[word] = log((self.N - freq + 0.5) / (freq + 0.5) + 1)
|
||||
|
||||
def score(self, query):
|
||||
"""Score all documents against query"""
|
||||
query_tokens = self.tokenize(query)
|
||||
scores = []
|
||||
|
||||
for idx, doc in enumerate(self.corpus):
|
||||
score = 0
|
||||
doc_len = self.doc_lengths[idx]
|
||||
term_freqs = defaultdict(int)
|
||||
for word in doc:
|
||||
term_freqs[word] += 1
|
||||
|
||||
for token in query_tokens:
|
||||
if token in self.idf:
|
||||
tf = term_freqs[token]
|
||||
idf = self.idf[token]
|
||||
numerator = tf * (self.k1 + 1)
|
||||
denominator = tf + self.k1 * (1 - self.b + self.b * doc_len / self.avgdl)
|
||||
score += idf * numerator / denominator
|
||||
|
||||
scores.append((idx, score))
|
||||
|
||||
return sorted(scores, key=lambda x: x[1], reverse=True)
|
||||
|
||||
|
||||
# ============ SEARCH FUNCTIONS ============
|
||||
def _load_csv(filepath):
|
||||
"""Load CSV and return list of dicts"""
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
return list(csv.DictReader(f))
|
||||
|
||||
|
||||
def _search_csv(filepath, search_cols, output_cols, query, max_results):
|
||||
"""Core search function using BM25"""
|
||||
if not filepath.exists():
|
||||
return []
|
||||
|
||||
data = _load_csv(filepath)
|
||||
|
||||
# Build documents from search columns
|
||||
documents = [" ".join(str(row.get(col, "")) for col in search_cols) for row in data]
|
||||
|
||||
# BM25 search
|
||||
bm25 = BM25()
|
||||
bm25.fit(documents)
|
||||
ranked = bm25.score(query)
|
||||
|
||||
# Get top results with score > 0
|
||||
results = []
|
||||
for idx, score in ranked[:max_results]:
|
||||
if score > 0:
|
||||
row = data[idx]
|
||||
results.append({col: row.get(col, "") for col in output_cols if col in row})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def detect_domain(query):
|
||||
"""Auto-detect the most relevant domain from query"""
|
||||
query_lower = query.lower()
|
||||
|
||||
domain_keywords = {
|
||||
"color": ["color", "palette", "hex", "#", "rgb"],
|
||||
"chart": ["chart", "graph", "visualization", "trend", "bar", "pie", "scatter", "heatmap", "funnel"],
|
||||
"landing": ["landing", "page", "cta", "conversion", "hero", "testimonial", "pricing", "section"],
|
||||
"product": ["saas", "ecommerce", "e-commerce", "fintech", "healthcare", "gaming", "portfolio", "crypto", "dashboard"],
|
||||
"prompt": ["prompt", "css", "implementation", "variable", "checklist", "tailwind"],
|
||||
"style": ["style", "design", "ui", "minimalism", "glassmorphism", "neumorphism", "brutalism", "dark mode", "flat", "aurora"],
|
||||
"ux": ["ux", "usability", "accessibility", "wcag", "touch", "scroll", "animation", "keyboard", "navigation", "mobile"],
|
||||
"typography": ["font", "typography", "heading", "serif", "sans"],
|
||||
"icons": ["icon", "icons", "lucide", "heroicons", "symbol", "glyph", "pictogram", "svg icon"],
|
||||
"react": ["react", "next.js", "nextjs", "suspense", "memo", "usecallback", "useeffect", "rerender", "bundle", "waterfall", "barrel", "dynamic import", "rsc", "server component"],
|
||||
"web": ["aria", "focus", "outline", "semantic", "virtualize", "autocomplete", "form", "input type", "preconnect"]
|
||||
}
|
||||
|
||||
scores = {domain: sum(1 for kw in keywords if kw in query_lower) for domain, keywords in domain_keywords.items()}
|
||||
best = max(scores, key=scores.get)
|
||||
return best if scores[best] > 0 else "style"
|
||||
|
||||
|
||||
def search(query, domain=None, max_results=MAX_RESULTS):
|
||||
"""Main search function with auto-domain detection"""
|
||||
if domain is None:
|
||||
domain = detect_domain(query)
|
||||
|
||||
config = CSV_CONFIG.get(domain, CSV_CONFIG["style"])
|
||||
filepath = DATA_DIR / config["file"]
|
||||
|
||||
if not filepath.exists():
|
||||
return {"error": f"File not found: {filepath}", "domain": domain}
|
||||
|
||||
results = _search_csv(filepath, config["search_cols"], config["output_cols"], query, max_results)
|
||||
|
||||
return {
|
||||
"domain": domain,
|
||||
"query": query,
|
||||
"file": config["file"],
|
||||
"count": len(results),
|
||||
"results": results
|
||||
}
|
||||
|
||||
|
||||
def search_stack(query, stack, max_results=MAX_RESULTS):
|
||||
"""Search stack-specific guidelines"""
|
||||
if stack not in STACK_CONFIG:
|
||||
return {"error": f"Unknown stack: {stack}. Available: {', '.join(AVAILABLE_STACKS)}"}
|
||||
|
||||
filepath = DATA_DIR / STACK_CONFIG[stack]["file"]
|
||||
|
||||
if not filepath.exists():
|
||||
return {"error": f"Stack file not found: {filepath}", "stack": stack}
|
||||
|
||||
results = _search_csv(filepath, _STACK_COLS["search_cols"], _STACK_COLS["output_cols"], query, max_results)
|
||||
|
||||
return {
|
||||
"domain": "stack",
|
||||
"stack": stack,
|
||||
"query": query,
|
||||
"file": STACK_CONFIG[stack]["file"],
|
||||
"count": len(results),
|
||||
"results": results
|
||||
}
|
||||
1067
skills/ui-ux-pro-max/scripts/design_system.py
Normal file
1067
skills/ui-ux-pro-max/scripts/design_system.py
Normal file
File diff suppressed because it is too large
Load Diff
106
skills/ui-ux-pro-max/scripts/search.py
Executable file
106
skills/ui-ux-pro-max/scripts/search.py
Executable file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
UI/UX Pro Max Search - BM25 search engine for UI/UX style guides
|
||||
Usage: python search.py "<query>" [--domain <domain>] [--stack <stack>] [--max-results 3]
|
||||
python search.py "<query>" --design-system [-p "Project Name"]
|
||||
python search.py "<query>" --design-system --persist [-p "Project Name"] [--page "dashboard"]
|
||||
|
||||
Domains: style, prompt, color, chart, landing, product, ux, typography
|
||||
Stacks: html-tailwind, react, nextjs
|
||||
|
||||
Persistence (Master + Overrides pattern):
|
||||
--persist Save design system to design-system/MASTER.md
|
||||
--page Also create a page-specific override file in design-system/pages/
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from core import CSV_CONFIG, AVAILABLE_STACKS, MAX_RESULTS, search, search_stack
|
||||
from design_system import generate_design_system, persist_design_system
|
||||
|
||||
|
||||
def format_output(result):
|
||||
"""Format results for Claude consumption (token-optimized)"""
|
||||
if "error" in result:
|
||||
return f"Error: {result['error']}"
|
||||
|
||||
output = []
|
||||
if result.get("stack"):
|
||||
output.append(f"## UI Pro Max Stack Guidelines")
|
||||
output.append(f"**Stack:** {result['stack']} | **Query:** {result['query']}")
|
||||
else:
|
||||
output.append(f"## UI Pro Max Search Results")
|
||||
output.append(f"**Domain:** {result['domain']} | **Query:** {result['query']}")
|
||||
output.append(f"**Source:** {result['file']} | **Found:** {result['count']} results\n")
|
||||
|
||||
for i, row in enumerate(result['results'], 1):
|
||||
output.append(f"### Result {i}")
|
||||
for key, value in row.items():
|
||||
value_str = str(value)
|
||||
if len(value_str) > 300:
|
||||
value_str = value_str[:300] + "..."
|
||||
output.append(f"- **{key}:** {value_str}")
|
||||
output.append("")
|
||||
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="UI Pro Max Search")
|
||||
parser.add_argument("query", help="Search query")
|
||||
parser.add_argument("--domain", "-d", choices=list(CSV_CONFIG.keys()), help="Search domain")
|
||||
parser.add_argument("--stack", "-s", choices=AVAILABLE_STACKS, help="Stack-specific search (html-tailwind, react, nextjs)")
|
||||
parser.add_argument("--max-results", "-n", type=int, default=MAX_RESULTS, help="Max results (default: 3)")
|
||||
parser.add_argument("--json", action="store_true", help="Output as JSON")
|
||||
# Design system generation
|
||||
parser.add_argument("--design-system", "-ds", action="store_true", help="Generate complete design system recommendation")
|
||||
parser.add_argument("--project-name", "-p", type=str, default=None, help="Project name for design system output")
|
||||
parser.add_argument("--format", "-f", choices=["ascii", "markdown"], default="ascii", help="Output format for design system")
|
||||
# Persistence (Master + Overrides pattern)
|
||||
parser.add_argument("--persist", action="store_true", help="Save design system to design-system/MASTER.md (creates hierarchical structure)")
|
||||
parser.add_argument("--page", type=str, default=None, help="Create page-specific override file in design-system/pages/")
|
||||
parser.add_argument("--output-dir", "-o", type=str, default=None, help="Output directory for persisted files (default: current directory)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Design system takes priority
|
||||
if args.design_system:
|
||||
result = generate_design_system(
|
||||
args.query,
|
||||
args.project_name,
|
||||
args.format,
|
||||
persist=args.persist,
|
||||
page=args.page,
|
||||
output_dir=args.output_dir
|
||||
)
|
||||
print(result)
|
||||
|
||||
# Print persistence confirmation
|
||||
if args.persist:
|
||||
project_slug = args.project_name.lower().replace(' ', '-') if args.project_name else "default"
|
||||
print("\n" + "=" * 60)
|
||||
print(f"✅ Design system persisted to design-system/{project_slug}/")
|
||||
print(f" 📄 design-system/{project_slug}/MASTER.md (Global Source of Truth)")
|
||||
if args.page:
|
||||
page_filename = args.page.lower().replace(' ', '-')
|
||||
print(f" 📄 design-system/{project_slug}/pages/{page_filename}.md (Page Overrides)")
|
||||
print("")
|
||||
print(f"📖 Usage: When building a page, check design-system/{project_slug}/pages/[page].md first.")
|
||||
print(f" If exists, its rules override MASTER.md. Otherwise, use MASTER.md.")
|
||||
print("=" * 60)
|
||||
# Stack search
|
||||
elif args.stack:
|
||||
result = search_stack(args.query, args.stack, args.max_results)
|
||||
if args.json:
|
||||
import json
|
||||
print(json.dumps(result, indent=2, ensure_ascii=False))
|
||||
else:
|
||||
print(format_output(result))
|
||||
# Domain search
|
||||
else:
|
||||
result = search(args.query, args.domain, args.max_results)
|
||||
if args.json:
|
||||
import json
|
||||
print(json.dumps(result, indent=2, ensure_ascii=False))
|
||||
else:
|
||||
print(format_output(result))
|
||||
82
skills/ui-ux-pro-max/scripts/wordpress_safe_update.sh
Executable file
82
skills/ui-ux-pro-max/scripts/wordpress_safe_update.sh
Executable file
@@ -0,0 +1,82 @@
|
||||
#!/bin/bash
|
||||
|
||||
# WordPress Safe Content Update Script
|
||||
# Prevents encoding issues when updating WordPress post content
|
||||
# Usage: ./wordpress_safe_update.sh <post_id> <html_file_path>
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
DB_NAME="wordpress"
|
||||
DB_USER="root"
|
||||
TABLE_PREFIX="wp_"
|
||||
|
||||
# Check arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "Usage: $0 <post_id> <html_file_path>"
|
||||
echo "Example: $0 112 /tmp/article_content.html"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
POST_ID="$1"
|
||||
HTML_FILE="$2"
|
||||
|
||||
# Verify file exists
|
||||
if [ ! -f "$HTML_FILE" ]; then
|
||||
echo "Error: File not found: $HTML_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify file is readable
|
||||
if [ ! -r "$HTML_FILE" ]; then
|
||||
echo "Error: File is not readable: $HTML_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check file encoding
|
||||
FILE_ENCODING=$(file -b --mime-encoding "$HTML_FILE")
|
||||
if [[ ! "$FILE_ENCODING" =~ utf-8 ]]; then
|
||||
echo "Warning: File encoding is $FILE_ENCODING, expected utf-8"
|
||||
echo "Converting to UTF-8..."
|
||||
iconv -f UTF-8 -t UTF-8 "$HTML_FILE" > "${HTML_FILE}.utf8"
|
||||
mv "${HTML_FILE}.utf8" "$HTML_FILE"
|
||||
fi
|
||||
|
||||
# Verify no literal escape sequences
|
||||
if grep -q '\\n' "$HTML_FILE"; then
|
||||
echo "Error: File contains literal \\n escape sequences"
|
||||
echo "Please fix the file before updating WordPress"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Backup current content
|
||||
echo "Backing up current content..."
|
||||
sudo mysql "${DB_NAME}" --skip-column-names --raw \
|
||||
-e "SELECT post_content FROM ${TABLE_PREFIX}posts WHERE ID = ${POST_ID};" \
|
||||
> "/tmp/wp_post_${POST_ID}_backup_$(date +%Y%m%d_%H%M%S).html"
|
||||
|
||||
# Update post content
|
||||
echo "Updating post ID ${POST_ID}..."
|
||||
sudo mysql "${DB_NAME}" \
|
||||
-e "UPDATE ${TABLE_PREFIX}posts SET post_content = LOAD_FILE('${HTML_FILE}') WHERE ID = ${POST_ID};"
|
||||
|
||||
# Verify update
|
||||
RESULT=$(sudo mysql "${DB_NAME}" --skip-column-names --raw \
|
||||
-e "SELECT post_content FROM ${TABLE_PREFIX}posts WHERE ID = ${POST_ID};" \
|
||||
| head -20)
|
||||
|
||||
if echo "$RESULT" | grep -q '<!DOCTYPE\|<section\|<div'; then
|
||||
echo "✓ Update successful!"
|
||||
echo "✓ Post ID ${POST_ID} has been updated"
|
||||
echo "✓ Content verified"
|
||||
else
|
||||
echo "✗ Update may have failed - please verify manually"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Verification preview:"
|
||||
echo "$RESULT"
|
||||
echo ""
|
||||
echo "To view full content:"
|
||||
echo "sudo mysql ${DB_NAME} --skip-column-names --raw -e \"SELECT post_content FROM ${TABLE_PREFIX}posts WHERE ID = ${POST_ID};\""
|
||||
Reference in New Issue
Block a user