mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2026-01-01 06:11:02 -06:00
* Add gzip compression init * revert * Feat: Initial cross-domain identify setup (#2533) * feat: initial setup * fix: factor out * chore: lint * fix: xss vuln * feat: set up properly * fix: lint * fix: key * fix: keys, cleanup * Fix: use sessionStorage instead of localStorage (#2541) * chore(deps): bump golang.org/x/crypto from 0.44.0 to 0.45.0 (#2545) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.44.0 to 0.45.0. - [Commits](https://github.com/golang/crypto/compare/v0.44.0...v0.45.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.45.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): bump google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml (#2547) Bumps [google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml](https://github.com/google/osv-scanner-action) from 2.2.4 to 2.3.0. - [Release notes](https://github.com/google/osv-scanner-action/releases) - [Commits](https://github.com/google/osv-scanner-action/compare/v2.2.4...v2.3.0) --- updated-dependencies: - dependency-name: google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml dependency-version: 2.3.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * [Go SDK] Resubscribe and get a new listener stream when gRPC connections fail (#2544) * fix listener cache issue to resubscribe when erroring out * worker retry message clarification (#2543) * add another retry layer and add comments * fix loop logic * make listener channel retry * Compression test utils, and add log to indicate its enabled * clean + fix * more fallbacks * common pgxpool afterconnect method (#2553) * remove * lint * lint * add cpu monitor during test * fix background monitor and lint * Make envvar to disable compression * cleanup monitoring * PR Feedback * Update paths in compression tests + bump package versions * path issue on test script --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: matt <mrkaye97@gmail.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Mohammed Nafees <hello@mnafees.me>
216 lines
7.6 KiB
Bash
Executable File
216 lines
7.6 KiB
Bash
Executable File
#!/bin/bash
|
|
# run_test.sh - Run compression test for a specific SDK
|
|
|
|
set -e
|
|
|
|
SDK=$1
|
|
STATE=$2
|
|
EVENTS_COUNT=${3:-10} # Default to 10 events if not specified
|
|
|
|
if [ -z "$SDK" ] || [ -z "$STATE" ]; then
|
|
echo "Usage: $0 <sdk> <state> [events_count]"
|
|
echo " sdk: go, typescript, or python"
|
|
echo " state: enabled or disabled"
|
|
echo " events_count: number of events to send (default: 10)"
|
|
exit 1
|
|
fi
|
|
|
|
if [ "$STATE" != "enabled" ] && [ "$STATE" != "disabled" ]; then
|
|
echo "Error: state must be 'enabled' or 'disabled'"
|
|
exit 1
|
|
fi
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
TEST_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
RESULTS_DIR="$TEST_DIR/results/$STATE"
|
|
CLIENT_CONTAINER="hatchet-client-${SDK}"
|
|
|
|
mkdir -p "$RESULTS_DIR"
|
|
|
|
echo "=========================================="
|
|
echo "Running ${SDK} SDK test (${STATE} compression)"
|
|
echo "Events: ${EVENTS_COUNT}"
|
|
echo "=========================================="
|
|
|
|
# Validate required environment variables
|
|
if [ -z "$HATCHET_CLIENT_TOKEN" ]; then
|
|
echo "Error: HATCHET_CLIENT_TOKEN environment variable is required"
|
|
echo "Usage: export HATCHET_CLIENT_TOKEN='your-token' && $0 <sdk> <state>"
|
|
exit 1
|
|
fi
|
|
|
|
# Set default host port for macOS Docker (use IP to avoid IPv6 resolution issues)
|
|
if [ -z "$HATCHET_CLIENT_HOST_PORT" ]; then
|
|
# Get the Docker gateway IP (host.docker.internal IPv4)
|
|
GATEWAY_IP=$(docker run --rm alpine getent hosts host.docker.internal 2>/dev/null | awk '{print $1}' | grep -E '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
|
if [ -z "$GATEWAY_IP" ]; then
|
|
GATEWAY_IP="192.168.65.254" # Default Docker Desktop gateway
|
|
fi
|
|
export HATCHET_CLIENT_HOST_PORT="${GATEWAY_IP}:7070"
|
|
echo "Using default HATCHET_CLIENT_HOST_PORT: $HATCHET_CLIENT_HOST_PORT"
|
|
fi
|
|
|
|
# Check if client image exists
|
|
IMAGE_NAME="${SDK}-${STATE}-compression"
|
|
if ! docker image inspect "$IMAGE_NAME" >/dev/null 2>&1; then
|
|
echo "Error: Docker image '$IMAGE_NAME' not found"
|
|
echo "Please build it first. See README.md for build instructions."
|
|
exit 1
|
|
fi
|
|
|
|
# Clean up any existing container
|
|
docker rm -f "$CLIENT_CONTAINER" 2>/dev/null || true
|
|
|
|
# Start the client container in background
|
|
echo "Starting ${SDK} client container..."
|
|
cd "$TEST_DIR"
|
|
|
|
# Export environment variables for docker-compose
|
|
export COMPRESSION_STATE="$STATE"
|
|
export HATCHET_CLIENT_TOKEN="${HATCHET_CLIENT_TOKEN}"
|
|
export HATCHET_CLIENT_HOST_PORT="${HATCHET_CLIENT_HOST_PORT}"
|
|
export HATCHET_CLIENT_SERVER_URL="${HATCHET_CLIENT_SERVER_URL:-http://localhost:8080}"
|
|
export HATCHET_CLIENT_API_URL="${HATCHET_CLIENT_API_URL:-${HATCHET_CLIENT_SERVER_URL:-http://localhost:8080}}"
|
|
export HATCHET_CLIENT_NAMESPACE="${HATCHET_CLIENT_NAMESPACE:-compression-test}"
|
|
export TEST_EVENTS_COUNT="${EVENTS_COUNT}"
|
|
|
|
# For Go test: --events is the rate (events per second), not total count
|
|
# To get approximately EVENTS_COUNT events, we need: duration = EVENTS_COUNT / rate
|
|
# We'll use 10 events per second rate
|
|
# Note: Due to timing, this may emit slightly more than EVENTS_COUNT events
|
|
EVENTS_PER_SECOND=10
|
|
# Calculate duration: events_count / rate (rounded up to nearest second)
|
|
DURATION_SECONDS=$((EVENTS_COUNT / EVENTS_PER_SECOND))
|
|
# Round up if there's a remainder
|
|
if [ $((EVENTS_COUNT % EVENTS_PER_SECOND)) -gt 0 ]; then
|
|
DURATION_SECONDS=$((DURATION_SECONDS + 1))
|
|
fi
|
|
# Ensure minimum of 1 second
|
|
if [ $DURATION_SECONDS -lt 1 ]; then
|
|
DURATION_SECONDS=1
|
|
fi
|
|
export TEST_EVENTS_RATE="${EVENTS_PER_SECOND}"
|
|
export TEST_DURATION="${DURATION_SECONDS}s"
|
|
# Wait time should be duration + buffer for events to complete processing
|
|
# Add buffer for processing time (events take time to execute)
|
|
export TEST_WAIT="$((DURATION_SECONDS + 5))s"
|
|
|
|
# Run docker-compose with environment variables
|
|
docker-compose run -d \
|
|
--name "$CLIENT_CONTAINER" \
|
|
"client-${SDK}" > /dev/null 2>&1
|
|
|
|
# Wait a moment for container to start
|
|
sleep 2
|
|
|
|
# Calculate monitoring duration based on test duration
|
|
# For Go: use TEST_DURATION, for others: calculate from events count
|
|
if [ "$SDK" = "go" ]; then
|
|
# Parse duration (e.g., "5s" -> 5)
|
|
MONITOR_DURATION=$(echo "$TEST_DURATION" | sed 's/s$//')
|
|
MONITOR_DURATION=$((MONITOR_DURATION + 10)) # Add buffer
|
|
else
|
|
# For Python/TypeScript: events / 10 events per second + buffer
|
|
EVENTS_PER_SECOND=10
|
|
MONITOR_DURATION=$((EVENTS_COUNT / EVENTS_PER_SECOND + 15)) # Add 15 second buffer
|
|
fi
|
|
|
|
# Start network monitoring
|
|
echo "Starting network monitoring..."
|
|
"$SCRIPT_DIR/monitor_network.sh" "$CLIENT_CONTAINER" "$MONITOR_DURATION" "$RESULTS_DIR/${SDK}_network.log" &
|
|
MONITOR_PID=$!
|
|
|
|
# Stream logs in real-time (limit to last 10 lines to avoid huge files)
|
|
echo "Streaming container logs (press Ctrl+C to stop streaming, container will continue)..."
|
|
docker logs -f --tail 10 "$CLIENT_CONTAINER" 2>&1 | tee "$RESULTS_DIR/${SDK}_test.log" &
|
|
LOGS_PID=$!
|
|
|
|
# Wait for container to complete (with timeout)
|
|
echo "Waiting for test to complete..."
|
|
# Increase timeout for TypeScript (it may take longer to process)
|
|
if [ "$SDK" = "typescript" ]; then
|
|
TIMEOUT=180 # 3 minutes timeout for TypeScript
|
|
else
|
|
TIMEOUT=120 # 2 minutes timeout for others
|
|
fi
|
|
ELAPSED=0
|
|
while [ $ELAPSED -lt $TIMEOUT ]; do
|
|
if ! docker ps --format '{{.Names}}' | grep -q "^${CLIENT_CONTAINER}$"; then
|
|
# Container stopped
|
|
break
|
|
fi
|
|
sleep 1
|
|
ELAPSED=$((ELAPSED + 1))
|
|
done
|
|
|
|
# If container is still running after timeout, kill it
|
|
if docker ps --format '{{.Names}}' | grep -q "^${CLIENT_CONTAINER}$"; then
|
|
echo "Warning: Test timed out after ${TIMEOUT}s, stopping container..."
|
|
docker stop "$CLIENT_CONTAINER" > /dev/null 2>&1 || true
|
|
sleep 2 # Give it a moment to stop
|
|
fi
|
|
|
|
# Stop log streaming
|
|
kill $LOGS_PID 2>/dev/null || true
|
|
wait $LOGS_PID 2>/dev/null || true
|
|
|
|
# Wait for monitoring to complete - it needs to finish to write the summary file
|
|
# The monitoring script runs for MONITOR_DURATION seconds, then writes the summary
|
|
# Wait for it to complete (MONITOR_DURATION + small buffer for file I/O)
|
|
MONITOR_TIMEOUT=$((MONITOR_DURATION + 5))
|
|
ELAPSED=0
|
|
while [ $ELAPSED -lt $MONITOR_TIMEOUT ]; do
|
|
if ! kill -0 $MONITOR_PID 2>/dev/null; then
|
|
# Monitoring script finished
|
|
break
|
|
fi
|
|
sleep 1
|
|
ELAPSED=$((ELAPSED + 1))
|
|
done
|
|
|
|
# If still running after timeout, force kill it
|
|
if kill -0 $MONITOR_PID 2>/dev/null; then
|
|
kill -KILL $MONITOR_PID 2>/dev/null || true
|
|
fi
|
|
wait $MONITOR_PID 2>/dev/null || true
|
|
|
|
# Clean up container
|
|
docker rm -f "$CLIENT_CONTAINER" > /dev/null 2>&1 || true
|
|
|
|
# Helper function to format bytes in human-readable format
|
|
format_bytes() {
|
|
local bytes=$1
|
|
if [ $bytes -ge 1099511627776 ]; then
|
|
awk "BEGIN {printf \"%.2f TB\", $bytes / 1099511627776}"
|
|
elif [ $bytes -ge 1073741824 ]; then
|
|
awk "BEGIN {printf \"%.2f GB\", $bytes / 1073741824}"
|
|
elif [ $bytes -ge 1048576 ]; then
|
|
awk "BEGIN {printf \"%.2f MB\", $bytes / 1048576}"
|
|
elif [ $bytes -ge 1024 ]; then
|
|
awk "BEGIN {printf \"%.2f KB\", $bytes / 1024}"
|
|
else
|
|
echo "${bytes} B"
|
|
fi
|
|
}
|
|
|
|
# Extract network summary
|
|
if [ -f "$RESULTS_DIR/${SDK}_network.log.summary" ]; then
|
|
source "$RESULTS_DIR/${SDK}_network.log.summary"
|
|
RX_FORMATTED=$(format_bytes $RX_BYTES)
|
|
TX_FORMATTED=$(format_bytes $TX_BYTES)
|
|
TOTAL_FORMATTED=$(format_bytes $TOTAL_BYTES)
|
|
echo ""
|
|
echo "=== Test Results ==="
|
|
echo "SDK: $SDK"
|
|
echo "State: $STATE"
|
|
echo "RX Bytes: $RX_FORMATTED ($RX_BYTES bytes)"
|
|
echo "TX Bytes: $TX_FORMATTED ($TX_BYTES bytes)"
|
|
echo "Total Bytes: $TOTAL_FORMATTED ($TOTAL_BYTES bytes)"
|
|
echo ""
|
|
echo "Results saved to: $RESULTS_DIR/${SDK}_network.log.summary"
|
|
else
|
|
echo "Warning: Could not find network summary file"
|
|
fi
|
|
|
|
echo "Test complete for ${SDK} SDK (${STATE})"
|