Add complete Docker testing infrastructure to validate all 3 installation methods Features: - Dockerfile: Ubuntu 22.04 with Node.js 20, Python 3, prerequisites - docker-compose.yml: Orchestrate 3 test containers + verification - Test suite with 5 scripts: * common.sh: Shared utilities and verification functions * test-interactive-install.sh: Test Option 2 (interactive installer) * test-master-prompt-install.sh: Test Option 1 (master prompt) * test-manual-install.sh: Test Option 3 (manual installation) * verify-all-installations.sh: Master verification with report generation - run-tests.sh: Quick start script for easy test execution What Gets Tested: ✓ Prerequisites (Node.js, npm, Python, Git, jq) ✓ Claude Code installation and version ✓ Settings files (settings.json, settings.local.json) ✓ 38 agents across 8 departments ✓ MCP tools (@z_ai/mcp-server, @z_ai/coding-helper, llm-tldr) ✓ UI/UX Pro Max skill ✓ Ralph CLI (optional, can be enabled) Test Results: - Saved to docker/test-env/test-results/ - Detailed logs for each test method - Component verification counts - Comprehensive final report with pass/fail status Usage: cd docker/test-env ./run-tests.sh Or manually: docker-compose up verify-all Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
216 lines
6.6 KiB
Bash
Executable File
216 lines
6.6 KiB
Bash
Executable File
#!/bin/bash
|
|
# Master verification script that runs all installation tests
|
|
# and generates a comprehensive report
|
|
|
|
set -e
|
|
|
|
# Source common functions
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
source "$SCRIPT_DIR/common.sh"
|
|
|
|
log_section "Claude Code Suite - Comprehensive Installation Test"
|
|
|
|
RESULTS_DIR="$HOME/test-results"
|
|
mkdir -p "$RESULTS_DIR"
|
|
|
|
FINAL_REPORT="$RESULTS_DIR/final-report-$(date +%Y%m%d-%H%M%S).txt"
|
|
|
|
# Initialize counters
|
|
TOTAL_TESTS=0
|
|
TOTAL_PASSED=0
|
|
TOTAL_FAILED=0
|
|
TOTAL_WARNINGS=0
|
|
|
|
# Array to store test results
|
|
declare -a TEST_RESULTS=()
|
|
|
|
# Run a test and capture results
|
|
run_test() {
|
|
local test_name="$1"
|
|
local test_script="$2"
|
|
|
|
log_section "Running: $test_name"
|
|
|
|
# Reset counters
|
|
TESTS_PASSED=0
|
|
TESTS_FAILED=0
|
|
TESTS_WARNINGS=0
|
|
|
|
# Run the test
|
|
if bash "$test_script"; then
|
|
TEST_RESULTS+=("✅ $test_name: PASSED")
|
|
TOTAL_PASSED=$((TOTAL_PASSED + 1))
|
|
else
|
|
TEST_RESULTS+=("❌ $test_name: FAILED")
|
|
TOTAL_FAILED=$((TOTAL_FAILED + 1))
|
|
fi
|
|
|
|
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
|
|
|
# Save test counts
|
|
echo "$test_name:$TESTS_PASSED:$TESTS_FAILED:$TESTS_WARNINGS" >> "$RESULTS_DIR/test-counts.txt"
|
|
|
|
echo ""
|
|
}
|
|
|
|
# Check if all test scripts exist
|
|
verify_test_scripts() {
|
|
log_section "Verifying Test Scripts"
|
|
|
|
local scripts=(
|
|
"test-interactive-install.sh"
|
|
"test-master-prompt-install.sh"
|
|
"test-manual-install.sh"
|
|
)
|
|
|
|
for script in "${scripts[@]}"; do
|
|
if [ -f "$SCRIPT_DIR/$script" ]; then
|
|
test_pass "Test script found: $script"
|
|
chmod +x "$SCRIPT_DIR/$script"
|
|
else
|
|
test_fail "Test script missing: $script"
|
|
fi
|
|
done
|
|
|
|
echo ""
|
|
}
|
|
|
|
# Generate final report
|
|
generate_final_report() {
|
|
log_section "Generating Final Report"
|
|
|
|
cat > "$FINAL_REPORT" << REPORT_EOF
|
|
╔═══════════════════════════════════════════════════════════════════════════╗
|
|
║ Claude Code Suite - Installation Test Report ║
|
|
╚═══════════════════════════════════════════════════════════════════════════╝
|
|
|
|
Test Date: $(date)
|
|
Docker Image: ubuntu:22.04
|
|
Node.js: $(node -v)
|
|
npm: $(npm -v)
|
|
Python: $(python3 --version)
|
|
|
|
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
|
|
TEST RESULTS SUMMARY:
|
|
|
|
Total Installation Methods Tested: $TOTAL_TESTS
|
|
Passed: $TOTAL_PASSED
|
|
Failed: $TOTAL_FAILED
|
|
|
|
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
|
|
DETAILED RESULTS:
|
|
|
|
REPORT_EOF
|
|
|
|
# Add each test result
|
|
for result in "${TEST_RESULTS[@]}"; do
|
|
echo "$result" >> "$FINAL_REPORT"
|
|
done
|
|
|
|
cat >> "$FINAL_REPORT" << REPORT_EOF2
|
|
|
|
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
|
|
COMPONENT VERIFICATION:
|
|
|
|
REPORT_EOF2
|
|
|
|
# Read individual test counts
|
|
if [ -f "$RESULTS_DIR/test-counts.txt" ]; then
|
|
while IFS=: read -r test_name passed failed warnings; do
|
|
echo "" >> "$FINAL_REPORT"
|
|
echo "$test_name:" >> "$FINAL_REPORT"
|
|
echo " Passed: $passed" >> "$FINAL_REPORT"
|
|
echo " Failed: $failed" >> "$FINAL_REPORT"
|
|
echo " Warnings: $warnings" >> "$FINAL_REPORT"
|
|
done < "$RESULTS_DIR/test-counts.txt"
|
|
fi
|
|
|
|
cat >> "$FINAL_REPORT" << REPORT_EOF3
|
|
|
|
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
|
|
CONCLUSION:
|
|
|
|
REPORT_EOF3
|
|
|
|
if [ $TOTAL_FAILED -eq 0 ]; then
|
|
cat >> "$FINAL_REPORT" << 'PASS_EOF'
|
|
✅ ALL INSTALLATION METHODS TESTED SUCCESSFULLY
|
|
|
|
All three installation methods (Interactive, Master Prompt, and Manual) have been
|
|
tested and verified to work correctly in a clean Docker environment.
|
|
|
|
Recommendation: All installation methods are PRODUCTION READY ✓
|
|
|
|
PASS_EOF
|
|
else
|
|
cat >> "$FINAL_REPORT" << 'FAIL_EOF'
|
|
❌ SOME INSTALLATION METHODS FAILED
|
|
|
|
One or more installation methods failed testing. Please review the detailed
|
|
results above and fix any issues before deployment.
|
|
|
|
Action Required: Review failed tests and fix installation scripts.
|
|
|
|
FAIL_EOF
|
|
fi
|
|
|
|
cat >> "$FINAL_REPORT" << REPORT_END
|
|
|
|
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
|
|
For detailed logs, see: $RESULTS_DIR/test-*.log
|
|
|
|
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
REPORT_END
|
|
|
|
# Display report
|
|
cat "$FINAL_REPORT"
|
|
|
|
# Also save to stdout for capture
|
|
log_info "Final report saved to: $FINAL_REPORT"
|
|
}
|
|
|
|
# Main execution
|
|
main() {
|
|
# Verify test scripts exist
|
|
verify_test_scripts
|
|
|
|
# Run all tests (in separate container instances ideally, but sequentially here)
|
|
# Note: In production, you'd run each in a fresh container
|
|
# For now, we'll run them sequentially with cleanup between tests
|
|
|
|
# Test 1: Interactive Installer
|
|
if [ -f "$SCRIPT_DIR/test-interactive-install.sh" ]; then
|
|
run_test "Interactive Installer" "$SCRIPT_DIR/test-interactive-install.sh"
|
|
fi
|
|
|
|
# Test 2: Master Prompt
|
|
if [ -f "$SCRIPT_DIR/test-master-prompt-install.sh" ]; then
|
|
run_test "Master Prompt Installation" "$SCRIPT_DIR/test-master-prompt-install.sh"
|
|
fi
|
|
|
|
# Test 3: Manual Installation
|
|
if [ -f "$SCRIPT_DIR/test-manual-install.sh" ]; then
|
|
run_test "Manual Installation" "$SCRIPT_DIR/test-manual-install.sh"
|
|
fi
|
|
|
|
# Generate final report
|
|
generate_final_report
|
|
|
|
# Exit with appropriate code
|
|
if [ $TOTAL_FAILED -eq 0 ]; then
|
|
log_success "All installation tests passed! ✅"
|
|
exit 0
|
|
else
|
|
log_error "Some installation tests failed! ❌"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Run main
|
|
main "$@"
|