#!/bin/bash # Superball Protocol Test Suite - Main Orchestrator # Tests SUP-01 through SUP-06 compliance set -e # Configuration TEST_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" RESULTS_DIR="$TEST_DIR/results" LOG_DIR="$TEST_DIR/logs" FIXTURES_DIR="$TEST_DIR/fixtures" # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color # Test suite TESTS=( "test_single_hop" "test_multi_hop" "test_padding" "test_delays" "test_relay_auth" "test_thrower_info" "test_end_to_end" ) # Initialize test environment init_test_env() { echo -e "${BLUE}=== Initializing Test Environment ===${NC}" mkdir -p "$RESULTS_DIR" "$LOG_DIR" # Check dependencies if ! command -v nak &> /dev/null; then echo -e "${RED}Error: 'nak' command not found. Please install nostr-army-knife${NC}" exit 1 fi if [ ! -f "$TEST_DIR/../superball_thrower" ]; then echo -e "${RED}Error: superball_thrower binary not found. Please run 'make' first${NC}" exit 1 fi echo -e "${GREEN}✓ Environment ready${NC}" } # Run all tests run_all_tests() { echo -e "${BLUE}=== Superball Protocol Test Suite ===${NC}" echo "Starting at $(date)" echo "" init_test_env local passed=0 local failed=0 local skipped=0 for test in "${TESTS[@]}"; do echo "" echo -e "${YELLOW}Running: $test${NC}" if [ ! -f "$TEST_DIR/${test}.sh" ]; then echo -e "${YELLOW}⊘ SKIPPED: $test (not implemented)${NC}" ((skipped++)) continue fi local start_time=$(date +%s) if bash "$TEST_DIR/${test}.sh" > "$LOG_DIR/${test}.log" 2>&1; then local end_time=$(date +%s) local duration=$((end_time - start_time)) echo -e "${GREEN}✓ PASSED: $test (${duration}s)${NC}" ((passed++)) else local end_time=$(date +%s) local duration=$((end_time - start_time)) echo -e "${RED}✗ FAILED: $test (${duration}s)${NC}" echo -e "${RED} See log: $LOG_DIR/${test}.log${NC}" ((failed++)) # Show last 20 lines of failed test log echo -e "${RED} Last 20 lines of log:${NC}" tail -n 20 "$LOG_DIR/${test}.log" | sed 's/^/ /' fi done echo "" echo -e "${BLUE}=== Test Summary ===${NC}" echo -e "${GREEN}Passed: $passed${NC}" echo -e "${RED}Failed: $failed${NC}" echo -e "${YELLOW}Skipped: $skipped${NC}" echo "Total: $((passed + failed + skipped))" echo "" echo "Completed at $(date)" # Generate results file cat > "$RESULTS_DIR/summary.txt" </dev/null; then echo "✓ $test" >> "$RESULTS_DIR/summary.txt" else echo "✗ $test" >> "$RESULTS_DIR/summary.txt" fi else echo "⊘ $test (not implemented)" >> "$RESULTS_DIR/summary.txt" fi done return $failed } # Run specific test run_test() { local test_name="$1" if [ ! -f "$TEST_DIR/${test_name}.sh" ]; then echo -e "${RED}Error: Test '$test_name' not found${NC}" exit 1 fi init_test_env echo -e "${YELLOW}Running: $test_name${NC}" bash "$TEST_DIR/${test_name}.sh" } # List available tests list_tests() { echo -e "${BLUE}Available Tests:${NC}" for test in "${TESTS[@]}"; do if [ -f "$TEST_DIR/${test}.sh" ]; then echo -e " ${GREEN}✓${NC} $test" else echo -e " ${YELLOW}⊘${NC} $test (not implemented)" fi done } # Show help show_help() { cat < Run specific test list List available tests help Show this help message Examples: $0 # Run all tests $0 all # Run all tests $0 test_single_hop # Run single hop test $0 list # List available tests Test Suite: test_single_hop - SUP-01: Basic single-hop routing test_multi_hop - SUP-02: Multi-hop routing (2-5 hops) test_padding - SUP-03: Padding payload handling test_delays - SUP-04: Delay and jitter verification test_relay_auth - SUP-05: Relay authentication testing test_thrower_info - SUP-06: Thrower info publishing test_end_to_end - Complete workflow test Results: Logs: $LOG_DIR/ Results: $RESULTS_DIR/ EOF } # Main case "${1:-all}" in all) run_all_tests ;; list) list_tests ;; help|--help|-h) show_help ;; *) run_test "$1" ;; esac