Major fixes: - Fixed Claude Code verification (was checking 'claude-code', now checks 'claude') - Fixed volume mount paths (use absolute path /tmp/claude-repo for runtime) - Fixed agents copy path (removed incorrect /agents/ subdirectory) - Fixed critical agent paths (studio-coach in bonus/, not project-management) - Added expect package for interactive installer automation - Fixed test count aggregation to read from individual result files Test Results (after fixes): ✅ Manual Installation: 27/27 passing (100%) ✅ Master Prompt Installation: 15/15 passing (100%) ⚠️ Interactive Installer: 7/13 passing (54% - expect automation issue) Note: Interactive installer works fine for manual testing, just difficult to automate with expect scripts due to prompt matching complexity.
345 lines
8.3 KiB
Bash
Executable File
345 lines
8.3 KiB
Bash
Executable File
#!/bin/bash
|
|
# Common utilities for installation testing
|
|
|
|
set -e
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
BOLD='\033[1m'
|
|
NC='\033[0m'
|
|
|
|
# Test results directory
|
|
RESULTS_DIR="$HOME/test-results"
|
|
mkdir -p "$RESULTS_DIR"
|
|
|
|
# Log file
|
|
LOG_FILE="$RESULTS_DIR/test-$(date +%Y%m%d-%H%M%S).log"
|
|
touch "$LOG_FILE"
|
|
|
|
# Logging functions
|
|
log_info() {
|
|
echo -e "${BLUE}[INFO]${NC} $1" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
log_success() {
|
|
echo -e "${GREEN}[✓]${NC} $1" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
log_error() {
|
|
echo -e "${RED}[✗]${NC} $1" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
log_warning() {
|
|
echo -e "${YELLOW}[!]${NC} $1" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
log_section() {
|
|
echo -e "${CYAN}═══════════════════════════════════════════════════════════${NC}" | tee -a "$LOG_FILE"
|
|
echo -e "${CYAN} $1${NC}" | tee -a "$LOG_FILE"
|
|
echo -e "${CYAN}═══════════════════════════════════════════════════════════${NC}" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
# Test result tracking
|
|
TESTS_PASSED=0
|
|
TESTS_FAILED=0
|
|
TESTS_WARNINGS=0
|
|
|
|
test_pass() {
|
|
log_success "$1"
|
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
}
|
|
|
|
test_fail() {
|
|
log_error "$1"
|
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
}
|
|
|
|
test_warn() {
|
|
log_warning "$1"
|
|
TESTS_WARNINGS=$((TESTS_WARNINGS + 1))
|
|
}
|
|
|
|
# Save test results
|
|
save_results() {
|
|
local test_name="$1"
|
|
local results_file="$RESULTS_DIR/${test_name}-results.txt"
|
|
|
|
{
|
|
echo "Test: $test_name"
|
|
echo "Date: $(date)"
|
|
echo "Passed: $TESTS_PASSED"
|
|
echo "Failed: $TESTS_FAILED"
|
|
echo "Warnings: $TESTS_WARNINGS"
|
|
echo ""
|
|
echo "Status:"
|
|
if [ $TESTS_FAILED -eq 0 ]; then
|
|
echo "✅ PASSED"
|
|
else
|
|
echo "❌ FAILED"
|
|
fi
|
|
} | tee "$results_file"
|
|
}
|
|
|
|
# Check if command exists
|
|
check_command() {
|
|
if command -v "$1" &> /dev/null; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Check if file exists
|
|
check_file() {
|
|
if [ -f "$1" ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Check if directory exists
|
|
check_dir() {
|
|
if [ -d "$1" ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Verify Claude Code installation
|
|
verify_claude_code() {
|
|
log_section "Verifying Claude Code Installation"
|
|
|
|
if check_command claude; then
|
|
test_pass "Claude Code is installed"
|
|
VERSION=$(claude --version 2>/dev/null || echo "unknown")
|
|
log_info "Version: $VERSION"
|
|
else
|
|
test_fail "Claude Code not found"
|
|
fi
|
|
|
|
echo ""
|
|
}
|
|
|
|
# Verify prerequisites
|
|
verify_prerequisites() {
|
|
log_section "Verifying Prerequisites"
|
|
|
|
# Node.js
|
|
if check_command node; then
|
|
NODE_VERSION=$(node -v)
|
|
test_pass "Node.js installed: $NODE_VERSION"
|
|
else
|
|
test_fail "Node.js not found"
|
|
fi
|
|
|
|
# npm
|
|
if check_command npm; then
|
|
NPM_VERSION=$(npm -v)
|
|
test_pass "npm installed: $NPM_VERSION"
|
|
else
|
|
test_fail "npm not found"
|
|
fi
|
|
|
|
# Python
|
|
if check_command python3; then
|
|
PYTHON_VERSION=$(python3 --version)
|
|
test_pass "Python installed: $PYTHON_VERSION"
|
|
else
|
|
test_warn "Python3 not found (optional)"
|
|
fi
|
|
|
|
# Git
|
|
if check_command git; then
|
|
GIT_VERSION=$(git --version)
|
|
test_pass "Git installed: $GIT_VERSION"
|
|
else
|
|
test_fail "Git not found"
|
|
fi
|
|
|
|
# jq
|
|
if check_command jq; then
|
|
test_pass "jq installed"
|
|
else
|
|
test_fail "jq not found"
|
|
fi
|
|
|
|
echo ""
|
|
}
|
|
|
|
# Verify agents installation
|
|
verify_agents() {
|
|
log_section "Verifying Agents Installation"
|
|
|
|
AGENTS_DIR="$HOME/.claude/agents"
|
|
|
|
if check_dir "$AGENTS_DIR"; then
|
|
test_pass "Agents directory exists"
|
|
|
|
# Count agent files
|
|
AGENT_COUNT=$(find "$AGENTS_DIR" -name "*.md" -type f 2>/dev/null | wc -l)
|
|
log_info "Found $AGENT_COUNT agent files"
|
|
|
|
if [ "$AGENT_COUNT" -ge 38 ]; then
|
|
test_pass "Expected 38+ agents (found: $AGENT_COUNT)"
|
|
else
|
|
test_fail "Expected 38+ agents, found: $AGENT_COUNT"
|
|
fi
|
|
|
|
# Check for critical agents
|
|
CRITICAL_AGENTS=(
|
|
"engineering/frontend-developer.md"
|
|
"engineering/backend-architect.md"
|
|
"marketing/tiktok-strategist.md"
|
|
"bonus/studio-coach.md"
|
|
"project-management/experiment-tracker.md"
|
|
)
|
|
|
|
for agent in "${CRITICAL_AGENTS[@]}"; do
|
|
if check_file "$AGENTS_DIR/$agent"; then
|
|
test_pass "Critical agent exists: $agent"
|
|
else
|
|
test_fail "Critical agent missing: $agent"
|
|
fi
|
|
done
|
|
else
|
|
test_fail "Agents directory not found"
|
|
fi
|
|
|
|
echo ""
|
|
}
|
|
|
|
# Verify MCP tools
|
|
verify_mcp_tools() {
|
|
log_section "Verifying MCP Tools"
|
|
|
|
# Check if packages are installed
|
|
if npm list -g @z_ai/mcp-server &>/dev/null; then
|
|
test_pass "@z_ai/mcp-server installed"
|
|
else
|
|
test_warn "@z_ai/mcp-server not installed"
|
|
fi
|
|
|
|
if npm list -g @z_ai/coding-helper &>/dev/null; then
|
|
test_pass "@z_ai/coding-helper installed"
|
|
else
|
|
test_warn "@z_ai/coding-helper not installed"
|
|
fi
|
|
|
|
if check_command tldr; then
|
|
test_pass "llm-tldr installed"
|
|
else
|
|
test_warn "llm-tldr not installed"
|
|
fi
|
|
|
|
# Test npx access
|
|
if npx @z_ai/mcp-server --help &>/dev/null; then
|
|
test_pass "@z_ai/mcp-server accessible via npx"
|
|
else
|
|
test_warn "@z_ai/mcp-server not accessible via npx"
|
|
fi
|
|
|
|
echo ""
|
|
}
|
|
|
|
# Verify Ralph CLI (optional)
|
|
verify_ralph() {
|
|
log_section "Verifying Ralph CLI (Optional)"
|
|
|
|
if check_command ralph; then
|
|
test_pass "Ralph CLI is installed"
|
|
|
|
# Check hook
|
|
HOOK_FILE="$HOME/.claude/hooks/ralph-auto-trigger.sh"
|
|
if check_file "$HOOK_FILE"; then
|
|
test_pass "Ralph auto-trigger hook exists"
|
|
|
|
if [ -x "$HOOK_FILE" ]; then
|
|
test_pass "Ralph hook is executable"
|
|
else
|
|
test_warn "Ralph hook exists but not executable"
|
|
fi
|
|
else
|
|
test_warn "Ralph hook not found"
|
|
fi
|
|
|
|
# Check hooks.json
|
|
if check_file "$HOME/.claude/hooks.json"; then
|
|
if grep -q "ralph-auto-trigger" "$HOME/.claude/hooks.json" 2>/dev/null; then
|
|
test_pass "hooks.json configured for Ralph"
|
|
else
|
|
test_warn "hooks.json exists but Ralph not configured"
|
|
fi
|
|
else
|
|
test_warn "hooks.json not found"
|
|
fi
|
|
else
|
|
log_info "Ralph CLI not installed (optional feature)"
|
|
fi
|
|
|
|
echo ""
|
|
}
|
|
|
|
# Verify settings files
|
|
verify_settings() {
|
|
log_section "Verifying Settings Files"
|
|
|
|
if check_file "$HOME/.claude/settings.json"; then
|
|
test_pass "settings.json exists"
|
|
|
|
# Check if valid JSON
|
|
if python3 -m json.tool "$HOME/.claude/settings.json" &>/dev/null; then
|
|
test_pass "settings.json is valid JSON"
|
|
else
|
|
test_fail "settings.json is not valid JSON"
|
|
fi
|
|
else
|
|
test_warn "settings.json not found"
|
|
fi
|
|
|
|
if check_file "$HOME/.claude/settings.local.json"; then
|
|
test_pass "settings.local.json exists"
|
|
else
|
|
test_warn "settings.local.json not found"
|
|
fi
|
|
|
|
echo ""
|
|
}
|
|
|
|
# Full verification
|
|
run_full_verification() {
|
|
log_section "Running Full Verification Suite"
|
|
|
|
verify_prerequisites
|
|
verify_claude_code
|
|
verify_settings
|
|
verify_agents
|
|
verify_mcp_tools
|
|
verify_ralph
|
|
|
|
# Summary
|
|
log_section "Test Summary"
|
|
echo -e "${GREEN}Passed: $TESTS_PASSED${NC}" | tee -a "$LOG_FILE"
|
|
echo -e "${YELLOW}Warnings: $TESTS_WARNINGS${NC}" | tee -a "$LOG_FILE"
|
|
echo -e "${RED}Failed: $TESTS_FAILED${NC}" | tee -a "$LOG_FILE"
|
|
echo ""
|
|
|
|
if [ $TESTS_FAILED -eq 0 ]; then
|
|
log_success "All tests passed! ✅"
|
|
return 0
|
|
else
|
|
log_error "Some tests failed! ❌"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
export -f log_info log_success log_error log_warning log_section
|
|
export -f test_pass test_fail test_warn save_results
|
|
export -f check_command check_file check_dir
|
|
export -f verify_prerequisites verify_claude_code verify_agents verify_mcp_tools verify_ralph verify_settings run_full_verification
|