Files
claude-code-glm-suite/docker/test-env/test-suite/verify-all-installations.sh
uroma 8014ad0bcc Fix Docker tests to achieve 100% on 2/3 installation methods
Major fixes:
- Fixed Claude Code verification (was checking 'claude-code', now checks 'claude')
- Fixed volume mount paths (use absolute path /tmp/claude-repo for runtime)
- Fixed agents copy path (removed incorrect /agents/ subdirectory)
- Fixed critical agent paths (studio-coach in bonus/, not project-management)
- Added expect package for interactive installer automation
- Fixed test count aggregation to read from individual result files

Test Results (after fixes):
 Manual Installation: 27/27 passing (100%)
 Master Prompt Installation: 15/15 passing (100%)
⚠️  Interactive Installer: 7/13 passing (54% - expect automation issue)

Note: Interactive installer works fine for manual testing, just difficult
to automate with expect scripts due to prompt matching complexity.
2026-01-16 10:54:31 +00:00

216 lines
6.8 KiB
Bash
Executable File

#!/bin/bash
# Master verification script that runs all installation tests
# and generates a comprehensive report
set -e
# Source common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/common.sh"
log_section "Claude Code Suite - Comprehensive Installation Test"
RESULTS_DIR="$HOME/test-results"
mkdir -p "$RESULTS_DIR"
FINAL_REPORT="$RESULTS_DIR/final-report-$(date +%Y%m%d-%H%M%S).txt"
# Initialize counters
TOTAL_TESTS=0
TOTAL_PASSED=0
TOTAL_FAILED=0
TOTAL_WARNINGS=0
# Array to store test results
declare -a TEST_RESULTS=()
# Run a test and capture results
run_test() {
local test_name="$1"
local test_script="$2"
log_section "Running: $test_name"
# Clear previous results file for this test
rm -f "$RESULTS_DIR/${test_name}-results.txt"
# Run the test
if bash "$test_script"; then
TEST_RESULTS+=("$test_name: PASSED")
TOTAL_PASSED=$((TOTAL_PASSED + 1))
else
TEST_RESULTS+=("$test_name: FAILED")
TOTAL_FAILED=$((TOTAL_FAILED + 1))
fi
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo ""
}
# Check if all test scripts exist
verify_test_scripts() {
log_section "Verifying Test Scripts"
local scripts=(
"test-interactive-install.sh"
"test-master-prompt-install.sh"
"test-manual-install.sh"
)
for script in "${scripts[@]}"; do
if [ -f "$SCRIPT_DIR/$script" ]; then
test_pass "Test script found: $script"
chmod +x "$SCRIPT_DIR/$script"
else
test_fail "Test script missing: $script"
fi
done
echo ""
}
# Generate final report
generate_final_report() {
log_section "Generating Final Report"
cat > "$FINAL_REPORT" << REPORT_EOF
╔═══════════════════════════════════════════════════════════════════════════╗
║ Claude Code Suite - Installation Test Report ║
╚═══════════════════════════════════════════════════════════════════════════╝
Test Date: $(date)
Docker Image: ubuntu:22.04
Node.js: $(node -v)
npm: $(npm -v)
Python: $(python3 --version)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
TEST RESULTS SUMMARY:
Total Installation Methods Tested: $TOTAL_TESTS
Passed: $TOTAL_PASSED
Failed: $TOTAL_FAILED
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
DETAILED RESULTS:
REPORT_EOF
# Add each test result
for result in "${TEST_RESULTS[@]}"; do
echo "$result" >> "$FINAL_REPORT"
done
cat >> "$FINAL_REPORT" << REPORT_EOF2
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
COMPONENT VERIFICATION:
REPORT_EOF2
# Read individual test result files
for results_file in "$RESULTS_DIR"/*-results.txt; do
if [ -f "$results_file" ]; then
local test_name=$(grep "^Test:" "$results_file" | sed 's/Test: //')
local passed=$(grep "^Passed:" "$results_file" | sed 's/Passed: //')
local failed=$(grep "^Failed:" "$results_file" | sed 's/Failed: //')
local warnings=$(grep "^Warnings:" "$results_file" | sed 's/Warnings: //')
echo "" >> "$FINAL_REPORT"
echo "$test_name:" >> "$FINAL_REPORT"
echo " Passed: $passed" >> "$FINAL_REPORT"
echo " Failed: $failed" >> "$FINAL_REPORT"
echo " Warnings: $warnings" >> "$FINAL_REPORT"
fi
done
cat >> "$FINAL_REPORT" << REPORT_EOF3
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
CONCLUSION:
REPORT_EOF3
if [ $TOTAL_FAILED -eq 0 ]; then
cat >> "$FINAL_REPORT" << 'PASS_EOF'
✅ ALL INSTALLATION METHODS TESTED SUCCESSFULLY
All three installation methods (Interactive, Master Prompt, and Manual) have been
tested and verified to work correctly in a clean Docker environment.
Recommendation: All installation methods are PRODUCTION READY ✓
PASS_EOF
else
cat >> "$FINAL_REPORT" << 'FAIL_EOF'
❌ SOME INSTALLATION METHODS FAILED
One or more installation methods failed testing. Please review the detailed
results above and fix any issues before deployment.
Action Required: Review failed tests and fix installation scripts.
FAIL_EOF
fi
cat >> "$FINAL_REPORT" << REPORT_END
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
For detailed logs, see: $RESULTS_DIR/test-*.log
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
REPORT_END
# Display report
cat "$FINAL_REPORT"
# Also save to stdout for capture
log_info "Final report saved to: $FINAL_REPORT"
}
# Main execution
main() {
# Verify test scripts exist
verify_test_scripts
# Run all tests (in separate container instances ideally, but sequentially here)
# Note: In production, you'd run each in a fresh container
# For now, we'll run them sequentially with cleanup between tests
# Test 1: Interactive Installer
if [ -f "$SCRIPT_DIR/test-interactive-install.sh" ]; then
run_test "Interactive Installer" "$SCRIPT_DIR/test-interactive-install.sh"
fi
# Test 2: Master Prompt
if [ -f "$SCRIPT_DIR/test-master-prompt-install.sh" ]; then
run_test "Master Prompt Installation" "$SCRIPT_DIR/test-master-prompt-install.sh"
fi
# Test 3: Manual Installation
if [ -f "$SCRIPT_DIR/test-manual-install.sh" ]; then
run_test "Manual Installation" "$SCRIPT_DIR/test-manual-install.sh"
fi
# Generate final report
generate_final_report
# Exit with appropriate code
if [ $TOTAL_FAILED -eq 0 ]; then
log_success "All installation tests passed! ✅"
exit 0
else
log_error "Some installation tests failed! ❌"
exit 1
fi
}
# Run main
main "$@"