feat: add runner conversion scripts and strengthen cutover automation

This commit is contained in:
S
2026-03-04 13:32:06 -06:00
parent e624885bb9
commit c2087d5087
43 changed files with 6995 additions and 42 deletions

View File

@@ -0,0 +1,12 @@
# .env — Shared configuration for all runner containers.
#
# Copy this file to .env and fill in your values:
# cp .env.example .env
#
# This file is loaded by docker-compose.yml and applies to ALL runner services.
# Per-repo settings (name, labels, resources) go in envs/<repo>.env instead.
# GitHub Personal Access Token (classic or fine-grained with "repo" scope).
# Used to generate short-lived registration tokens for each runner container.
# Never stored in the runner agent — only used at container startup.
GITHUB_PAT=ghp_xxxxxxxxxxxxxxxxxxxx

6
runners-conversion/augur/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
# Ignore real env files (contain secrets like GITHUB_PAT).
# Only .env.example and envs/*.env.example are tracked.
.env
!.env.example
envs/*.env
!envs/*.env.example

View File

@@ -0,0 +1,93 @@
# Dockerfile — GitHub Actions self-hosted runner image.
#
# Includes: Ubuntu 24.04 + Go 1.26 + Node 24 + GitHub Actions runner agent.
# Designed for CI workloads on Linux x64 servers (e.g., Unraid, bare metal).
#
# Build:
# docker build -t augur-runner .
#
# The image is also auto-built and pushed to GHCR by the
# build-runner-image.yml workflow on Dockerfile or entrypoint changes.
FROM ubuntu:24.04
# --- Metadata labels (OCI standard) ---
LABEL org.opencontainers.image.title="augur-runner" \
org.opencontainers.image.description="GitHub Actions self-hosted runner for augur CI" \
org.opencontainers.image.source="https://github.com/AIinfusedS/augur" \
org.opencontainers.image.licenses="Proprietary"
# --- Layer 1: System packages (changes least often) ---
# Combined into a single layer to minimize image size.
# --no-install-recommends avoids pulling unnecessary packages.
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates \
curl \
git \
jq \
python3 \
tini \
sudo \
unzip \
&& rm -rf /var/lib/apt/lists/*
# --- Layer 2: Go 1.26 (pinned version + SHA256 verification) ---
ARG GO_VERSION=1.26.0
ARG GO_SHA256=aac1b08a0fb0c4e0a7c1555beb7b59180b05dfc5a3d62e40e9de90cd42f88235
RUN curl -fsSL "https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz" -o /tmp/go.tar.gz && \
echo "${GO_SHA256} /tmp/go.tar.gz" | sha256sum -c - && \
tar -C /usr/local -xzf /tmp/go.tar.gz && \
rm /tmp/go.tar.gz
ENV PATH="/usr/local/go/bin:${PATH}"
# --- Layer 3: Node 24 LTS via NodeSource ---
ARG NODE_MAJOR=24
RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_MAJOR}.x | bash - && \
apt-get install -y --no-install-recommends nodejs && \
rm -rf /var/lib/apt/lists/*
# --- Layer 4: Create non-root runner user (UID/GID 1000) ---
# Ubuntu 24.04 ships with an 'ubuntu' user at UID/GID 1000.
# Remove it first, then create our runner user at the same IDs.
RUN userdel -r ubuntu 2>/dev/null || true && \
groupadd -f -g 1000 runner && \
useradd -m -u 1000 -g runner -s /bin/bash runner && \
echo "runner ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/runner
# --- Layer 5: GitHub Actions runner agent ---
# Downloads the latest runner release for linux-x64.
# The runner agent auto-updates itself between jobs, so pinning
# the exact version here is not critical.
ARG RUNNER_ARCH=x64
RUN RUNNER_VERSION=$(curl -fsSL https://api.github.com/repos/actions/runner/releases/latest \
| jq -r '.tag_name' | sed 's/^v//') && \
curl -fsSL "https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${RUNNER_ARCH}-${RUNNER_VERSION}.tar.gz" \
-o /tmp/runner.tar.gz && \
mkdir -p /home/runner/actions-runner && \
tar -xzf /tmp/runner.tar.gz -C /home/runner/actions-runner && \
rm /tmp/runner.tar.gz && \
chown -R runner:runner /home/runner/actions-runner && \
/home/runner/actions-runner/bin/installdependencies.sh
# --- Layer 6: Work directory (pre-create for Docker volume ownership) ---
# Docker named volumes inherit ownership from the mount point in the image.
# Creating _work as runner:runner ensures the volume is writable without sudo.
RUN mkdir -p /home/runner/_work && chown runner:runner /home/runner/_work
# --- Layer 7: Entrypoint (changes most often) ---
COPY --chown=runner:runner entrypoint.sh /home/runner/entrypoint.sh
RUN chmod +x /home/runner/entrypoint.sh
# --- Runtime configuration ---
USER runner
WORKDIR /home/runner/actions-runner
# Health check: verify the runner listener process is alive.
# start_period gives time for registration + first job pickup.
HEALTHCHECK --interval=30s --timeout=5s --retries=3 --start-period=30s \
CMD pgrep -f "Runner.Listener" > /dev/null || exit 1
# Use tini as PID 1 for proper signal forwarding and zombie reaping.
ENTRYPOINT ["tini", "--"]
CMD ["/home/runner/entrypoint.sh"]

View File

@@ -0,0 +1,416 @@
# Self-Hosted GitHub Actions Runner (Docker)
Run GitHub Actions CI on your own Linux server instead of GitHub-hosted runners.
Eliminates laptop CPU burden, avoids runner-minute quotas, and gives faster feedback.
## How It Works
Each runner container:
1. Starts up, generates a short-lived registration token from your GitHub PAT
2. Registers with GitHub in **ephemeral mode** (one job per lifecycle)
3. Picks up a CI job, executes it, and exits
4. Docker's `restart: unless-stopped` brings it back for the next job
## Prerequisites
- Docker Engine 24+ and Docker Compose v2
- A GitHub Personal Access Token (classic) with **`repo`** and **`read:packages`** scopes
- Network access to `github.com`, `api.github.com`, and `ghcr.io`
## One-Time GitHub Setup
Before deploying, the repository needs write permissions for the image build workflow.
### Enable GHCR image builds
The `build-runner-image.yml` workflow pushes Docker images to GHCR using the
`GITHUB_TOKEN`. By default, this token is read-only and the workflow will fail
silently (zero steps executed, no runner assigned).
Fix by allowing write permissions for Actions workflows:
```bash
gh api -X PUT repos/OWNER/REPO/actions/permissions/workflow \
-f default_workflow_permissions=write \
-F can_approve_pull_request_reviews=false
```
Alternatively, keep read-only defaults and create a dedicated PAT secret with
`write:packages` scope, then reference it in the workflow instead of `GITHUB_TOKEN`.
### Build the runner image
Trigger the GHCR image build (first time and whenever Dockerfile/entrypoint changes):
```bash
gh workflow run build-runner-image.yml
```
Wait for the workflow to complete (~5 min):
```bash
gh run list --workflow=build-runner-image.yml --limit=1
```
The image is also rebuilt automatically:
- On push to `main` when `infra/runners/Dockerfile` or `entrypoint.sh` changes
- Weekly (Monday 06:00 UTC) to pick up OS patches and runner agent updates
## Deploy on Your Server
### Choose an image source
| Method | Files needed on server | Registry auth? | Best for |
|--------|----------------------|----------------|----------|
| **Self-hosted registry** | `docker-compose.yml`, `.env`, `envs/augur.env` | No (your network) | Production — push once, pull from any machine |
| **GHCR** | `docker-compose.yml`, `.env`, `envs/augur.env` | Yes (`docker login ghcr.io`) | GitHub-native workflow |
| **Build locally** | All 5 files (+ `Dockerfile`, `entrypoint.sh`) | No | Quick start, no registry needed |
### Option A: Self-hosted registry (recommended)
For the full end-to-end workflow (build image on your Mac, push to Unraid registry,
start runner), see the [CI Workflow Guide](../../docs/ci-workflows.md#lifecycle-2-offload-ci-to-a-server-unraid).
The private Docker registry is configured at `infra/registry/`. It listens on port 5000,
accessible from the LAN. Docker treats `localhost` registries as insecure by default —
no `daemon.json` changes needed on the server. To push from another machine, add
`<UNRAID_IP>:5000` to `insecure-registries` in that machine's Docker daemon config.
### Option B: GHCR
Requires the `build-runner-image.yml` workflow to have run successfully
(see [One-Time GitHub Setup](#one-time-github-setup)).
```bash
# 1. Copy environment templates
cp .env.example .env
cp envs/augur.env.example envs/augur.env
# 2. Edit .env — set your GITHUB_PAT
# 3. Edit envs/augur.env — set REPO_URL, RUNNER_NAME, resource limits
# 4. Authenticate Docker with GHCR (one-time, persists to ~/.docker/config.json)
echo "$GITHUB_PAT" | docker login ghcr.io -u YOUR_GITHUB_USERNAME --password-stdin
# 5. Pull and start
docker compose pull
docker compose up -d
# 6. Verify runner is registered
docker compose ps
docker compose logs -f runner-augur
```
### Option C: Build locally
No registry needed — builds the image directly on the target machine.
Requires `Dockerfile` and `entrypoint.sh` alongside the compose file.
```bash
# 1. Copy environment templates
cp .env.example .env
cp envs/augur.env.example envs/augur.env
# 2. Edit .env — set your GITHUB_PAT
# 3. Edit envs/augur.env — set REPO_URL, RUNNER_NAME, resource limits
# 4. Build and start
docker compose up -d --build
# 5. Verify runner is registered
docker compose ps
docker compose logs -f runner-augur
```
### Verify the runner is online in GitHub
```bash
gh api repos/OWNER/REPO/actions/runners \
--jq '.runners[] | {name, status, labels: [.labels[].name]}'
```
## Activate Self-Hosted CI
Set the repository variable `CI_RUNS_ON` so the CI workflow targets your runner:
```bash
gh variable set CI_RUNS_ON --body '["self-hosted", "Linux", "X64"]'
```
To revert to GitHub-hosted runners:
```bash
gh variable delete CI_RUNS_ON
```
## Configuration
### Shared Config (`.env`)
| Variable | Required | Description |
|----------|----------|-------------|
| `GITHUB_PAT` | Yes | GitHub PAT with `repo` + `read:packages` scope |
### Per-Repo Config (`envs/<repo>.env`)
| Variable | Required | Default | Description |
|----------|----------|---------|-------------|
| `REPO_URL` | Yes | — | Full GitHub repository URL |
| `RUNNER_NAME` | Yes | — | Unique runner name within the repo |
| `RUNNER_LABELS` | No | `self-hosted,Linux,X64` | Comma-separated runner labels |
| `RUNNER_GROUP` | No | `default` | Runner group |
| `RUNNER_IMAGE` | No | `ghcr.io/aiinfuseds/augur-runner:latest` | Docker image to use |
| `RUNNER_CPUS` | No | `6` | CPU limit for the container |
| `RUNNER_MEMORY` | No | `12G` | Memory limit for the container |
## Adding More Repos
1. Copy the per-repo env template:
```bash
cp envs/augur.env.example envs/myrepo.env
```
2. Edit `envs/myrepo.env` — set `REPO_URL`, `RUNNER_NAME`, and resource limits.
3. Add a service block to `docker-compose.yml`:
```yaml
runner-myrepo:
image: ${RUNNER_IMAGE:-ghcr.io/aiinfuseds/augur-runner:latest}
build: .
env_file:
- .env
- envs/myrepo.env
init: true
read_only: true
tmpfs:
- /tmp:size=2G
security_opt:
- no-new-privileges:true
stop_grace_period: 5m
deploy:
resources:
limits:
cpus: "${RUNNER_CPUS:-6}"
memory: "${RUNNER_MEMORY:-12G}"
restart: unless-stopped
healthcheck:
test: ["CMD", "pgrep", "-f", "Runner.Listener"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
logging:
driver: json-file
options:
max-size: "50m"
max-file: "3"
volumes:
- myrepo-work:/home/runner/_work
```
4. Add the volume at the bottom of `docker-compose.yml`:
```yaml
volumes:
augur-work:
myrepo-work:
```
5. Start: `docker compose up -d`
## Scaling
Run multiple concurrent runners for the same repo:
```bash
# Scale to 3 runners for augur
docker compose up -d --scale runner-augur=3
```
Each container gets a unique runner name (Docker appends a suffix).
Set `RUNNER_NAME` to a base name like `unraid-augur` — scaled instances become
`unraid-augur-1`, `unraid-augur-2`, etc.
## Resource Tuning
Each repo can have different resource limits in its env file:
```env
# Lightweight repo (linting only)
RUNNER_CPUS=2
RUNNER_MEMORY=4G
# Heavy repo (Go builds + extensive tests)
RUNNER_CPUS=8
RUNNER_MEMORY=16G
```
### tmpfs Sizing
The `/tmp` tmpfs defaults to 2G. If your CI writes large temp files,
increase it in `docker-compose.yml`:
```yaml
tmpfs:
- /tmp:size=4G
```
## Monitoring
```bash
# Container status and health
docker compose ps
# Live logs
docker compose logs -f runner-augur
# Last 50 log lines
docker compose logs --tail 50 runner-augur
# Resource usage
docker stats runner-augur
```
## Updating the Runner Image
To pull the latest GHCR image:
```bash
docker compose pull
docker compose up -d
```
To rebuild locally:
```bash
docker compose build
docker compose up -d
```
### Using a Self-Hosted Registry
See the [CI Workflow Guide](../../docs/ci-workflows.md#lifecycle-2-offload-ci-to-a-server-unraid)
for the full build-push-start workflow with a self-hosted registry.
## Troubleshooting
### Image build workflow fails with zero steps
The `build-runner-image.yml` workflow needs `packages: write` permission.
If the repo's default workflow permissions are read-only, the job fails
instantly (0 steps, no runner assigned). See [One-Time GitHub Setup](#one-time-github-setup).
### `docker compose pull` returns "access denied" or 403
The GHCR package inherits the repository's visibility. For private repos,
authenticate Docker first:
```bash
echo "$GITHUB_PAT" | docker login ghcr.io -u USERNAME --password-stdin
```
Or make the package public:
```bash
gh api -X PATCH /user/packages/container/augur-runner -f visibility=public
```
Or skip GHCR entirely and build locally: `docker compose build`.
### Runner doesn't appear in GitHub
1. Check logs: `docker compose logs runner-augur`
2. Verify `GITHUB_PAT` has `repo` scope
3. Verify `REPO_URL` is correct (full HTTPS URL)
4. Check network: `docker compose exec runner-augur curl -s https://api.github.com`
### Runner appears "offline"
The runner may have exited after a job. Check:
```bash
docker compose ps # Is the container running?
docker compose restart runner-augur # Force restart
```
### OOM (Out of Memory) kills
Increase `RUNNER_MEMORY` in the per-repo env file:
```env
RUNNER_MEMORY=16G
```
Then: `docker compose up -d`
### Stale/ghost runners in GitHub
Ephemeral runners deregister automatically after each job. If a container
was killed ungracefully (power loss, `docker kill`), the runner may appear
stale. It will auto-expire after a few hours, or remove manually:
```bash
# List runners
gh api repos/OWNER/REPO/actions/runners --jq '.runners[] | {id, name, status}'
# Remove stale runner by ID
gh api -X DELETE repos/OWNER/REPO/actions/runners/RUNNER_ID
```
### Disk space
Check work directory volume usage:
```bash
docker system df -v
```
Clean up unused volumes:
```bash
docker compose down -v # Remove work volumes
docker volume prune # Remove all unused volumes
```
## Unraid Notes
- **Docker login persistence**: `docker login ghcr.io` writes credentials to
`/root/.docker/config.json`. On Unraid, `/root` is on the USB flash drive
and persists across reboots. Verify with `cat /root/.docker/config.json`
after login.
- **Compose file location**: Place the 3 files (`docker-compose.yml`, `.env`,
`envs/augur.env`) in a share directory (e.g., `/mnt/user/appdata/augur-runner/`).
- **Alternative to GHCR**: If you don't want to deal with registry auth on Unraid,
copy the `Dockerfile` and `entrypoint.sh` alongside the compose file and use
`docker compose up -d --build` instead. No registry needed.
## Security
| Measure | Description |
|---------|-------------|
| Ephemeral mode | Fresh runner state per job — no cross-job contamination |
| PAT scope isolation | PAT generates a short-lived registration token; PAT never touches the runner agent |
| Non-root user | Runner process runs as UID 1000, not root |
| no-new-privileges | Prevents privilege escalation via setuid/setgid binaries |
| tini (PID 1) | Proper signal forwarding and zombie process reaping |
| Log rotation | Prevents disk exhaustion from verbose CI output (50MB x 3 files) |
### PAT Scope
Use the minimum scope required:
- **Classic token**: `repo` + `read:packages` scopes
- **Fine-grained token**: Repository access → Only select repositories → Read and Write for Administration
### Network Considerations
The runner container needs outbound access to:
- `github.com` (clone repos, download actions)
- `api.github.com` (registration, status)
- `ghcr.io` (pull runner image — only if using GHCR)
- Package registries (`proxy.golang.org`, `registry.npmjs.org`, etc.)
No inbound ports are required.
## Stopping and Removing
```bash
# Stop runners (waits for stop_grace_period)
docker compose down
# Stop and remove work volumes
docker compose down -v
# Stop, remove volumes, and delete the locally built image
docker compose down -v --rmi local
```

View File

@@ -0,0 +1,496 @@
#!/usr/bin/env bash
# actions-local.sh — Setup/start/stop local GitHub Actions runtime on macOS.
#
# This script prepares and manages local execution of workflows with `act`.
# Default runtime is Colima (free, local Docker daemon).
#
# Typical flow:
# 1) ./scripts/actions-local.sh --mode setup
# 2) ./scripts/actions-local.sh --mode start
# 3) act -W .github/workflows/ci-quality-gates.yml
# 4) ./scripts/actions-local.sh --mode stop
set -euo pipefail
MODE=""
RUNTIME="auto"
RUNTIME_EXPLICIT=false
REFRESH_BREW=false
COLIMA_PROFILE="${AUGUR_ACTIONS_COLIMA_PROFILE:-augur-actions}"
COLIMA_CPU="${AUGUR_ACTIONS_COLIMA_CPU:-4}"
COLIMA_MEMORY_GB="${AUGUR_ACTIONS_COLIMA_MEMORY_GB:-8}"
COLIMA_DISK_GB="${AUGUR_ACTIONS_COLIMA_DISK_GB:-60}"
WAIT_TIMEOUT_SEC="${AUGUR_ACTIONS_WAIT_TIMEOUT_SEC:-180}"
STATE_DIR="${TMPDIR:-/tmp}"
STATE_FILE="${STATE_DIR%/}/augur-actions-local.state"
STATE_RUNTIME=""
STATE_PROFILE=""
STATE_STARTED_BY_SCRIPT="0"
usage() {
cat <<'EOF'
Usage:
./scripts/actions-local.sh --mode <setup|start|stop> [options]
Required:
--mode MODE One of: setup, start, stop
Options:
--runtime RUNTIME Runtime choice: auto, colima, docker-desktop (default: auto)
--refresh-brew In setup mode, force brew metadata refresh even if nothing is missing
--colima-profile NAME Colima profile name (default: augur-actions)
--cpu N Colima CPU count for start (default: 4)
--memory-gb N Colima memory (GB) for start (default: 8)
--disk-gb N Colima disk (GB) for start (default: 60)
-h, --help Show this help
Examples:
./scripts/actions-local.sh --mode setup
./scripts/actions-local.sh --mode start
./scripts/actions-local.sh --mode start --runtime colima --cpu 6 --memory-gb 12
./scripts/actions-local.sh --mode stop
./scripts/actions-local.sh --mode stop --runtime colima
Environment overrides:
AUGUR_ACTIONS_COLIMA_PROFILE
AUGUR_ACTIONS_COLIMA_CPU
AUGUR_ACTIONS_COLIMA_MEMORY_GB
AUGUR_ACTIONS_COLIMA_DISK_GB
AUGUR_ACTIONS_WAIT_TIMEOUT_SEC
EOF
}
log() {
printf '[actions-local] %s\n' "$*"
}
warn() {
printf '[actions-local] WARNING: %s\n' "$*" >&2
}
die() {
printf '[actions-local] ERROR: %s\n' "$*" >&2
exit 1
}
require_cmd() {
local cmd="$1"
command -v "$cmd" >/dev/null 2>&1 || die "required command not found: $cmd"
}
ensure_macos() {
local os
os="$(uname -s)"
[[ "$os" == "Darwin" ]] || die "This script currently supports macOS only."
}
parse_args() {
while [[ $# -gt 0 ]]; do
case "$1" in
--mode)
shift
[[ $# -gt 0 ]] || die "--mode requires a value"
MODE="$1"
shift
;;
--runtime)
shift
[[ $# -gt 0 ]] || die "--runtime requires a value"
RUNTIME="$1"
RUNTIME_EXPLICIT=true
shift
;;
--refresh-brew)
REFRESH_BREW=true
shift
;;
--colima-profile)
shift
[[ $# -gt 0 ]] || die "--colima-profile requires a value"
COLIMA_PROFILE="$1"
shift
;;
--cpu)
shift
[[ $# -gt 0 ]] || die "--cpu requires a value"
COLIMA_CPU="$1"
shift
;;
--memory-gb)
shift
[[ $# -gt 0 ]] || die "--memory-gb requires a value"
COLIMA_MEMORY_GB="$1"
shift
;;
--disk-gb)
shift
[[ $# -gt 0 ]] || die "--disk-gb requires a value"
COLIMA_DISK_GB="$1"
shift
;;
-h|--help)
usage
exit 0
;;
*)
die "unknown argument: $1"
;;
esac
done
[[ -n "$MODE" ]] || die "--mode is required (setup|start|stop)"
case "$MODE" in
setup|start|stop) ;;
*) die "invalid --mode: $MODE (expected setup|start|stop)" ;;
esac
case "$RUNTIME" in
auto|colima|docker-desktop) ;;
*) die "invalid --runtime: $RUNTIME (expected auto|colima|docker-desktop)" ;;
esac
}
ensure_command_line_tools() {
if xcode-select -p >/dev/null 2>&1; then
log "Xcode Command Line Tools already installed."
return
fi
log "Xcode Command Line Tools missing; attempting automated install..."
local marker="/tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress"
local label=""
touch "$marker"
label="$(softwareupdate -l 2>/dev/null | sed -n 's/^\* Label: //p' | grep 'Command Line Tools' | tail -n1 || true)"
rm -f "$marker"
if [[ -n "$label" ]]; then
sudo softwareupdate -i "$label" --verbose
sudo xcode-select --switch /Library/Developer/CommandLineTools
else
warn "Could not auto-detect Command Line Tools package; launching GUI installer."
xcode-select --install || true
die "Finish installing Command Line Tools, then re-run setup."
fi
xcode-select -p >/dev/null 2>&1 || die "Command Line Tools installation did not complete."
log "Xcode Command Line Tools installed."
}
ensure_homebrew() {
if command -v brew >/dev/null 2>&1; then
log "Homebrew already installed."
else
require_cmd curl
log "Installing Homebrew..."
NONINTERACTIVE=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
if [[ -x /opt/homebrew/bin/brew ]]; then
eval "$(/opt/homebrew/bin/brew shellenv)"
elif [[ -x /usr/local/bin/brew ]]; then
eval "$(/usr/local/bin/brew shellenv)"
elif command -v brew >/dev/null 2>&1; then
eval "$("$(command -v brew)" shellenv)"
else
die "Homebrew not found after installation."
fi
log "Homebrew ready: $(brew --version | head -n1)"
}
install_brew_formula_if_missing() {
local formula="$1"
if brew list --versions "$formula" >/dev/null 2>&1; then
log "Already installed: $formula"
else
log "Installing: $formula"
brew install "$formula"
fi
}
list_missing_formulas() {
local formulas=("$@")
local -a missing=()
local formula
for formula in "${formulas[@]}"; do
if ! brew list --versions "$formula" >/dev/null 2>&1; then
missing+=("$formula")
fi
done
if [[ "${#missing[@]}" -gt 0 ]]; then
printf '%s\n' "${missing[@]}"
fi
}
colima_context_name() {
local profile="$1"
if [[ "$profile" == "default" ]]; then
printf 'colima'
else
printf 'colima-%s' "$profile"
fi
}
colima_is_running() {
local out
out="$(colima status --profile "$COLIMA_PROFILE" 2>&1 || true)"
if printf '%s' "$out" | grep -qi "not running"; then
return 1
fi
if printf '%s' "$out" | grep -qi "running"; then
return 0
fi
return 1
}
docker_ready() {
docker info >/dev/null 2>&1
}
wait_for_docker() {
local waited=0
while ! docker_ready; do
if (( waited >= WAIT_TIMEOUT_SEC )); then
die "Docker daemon not ready after ${WAIT_TIMEOUT_SEC}s."
fi
sleep 2
waited=$((waited + 2))
done
}
write_state() {
local runtime="$1"
local started="$2"
cat > "$STATE_FILE" <<EOF
runtime=$runtime
profile=$COLIMA_PROFILE
started_by_script=$started
timestamp=$(date -u +%Y-%m-%dT%H:%M:%SZ)
EOF
}
read_state() {
STATE_RUNTIME=""
STATE_PROFILE=""
STATE_STARTED_BY_SCRIPT="0"
[[ -f "$STATE_FILE" ]] || return 0
while IFS='=' read -r key value; do
case "$key" in
runtime) STATE_RUNTIME="$value" ;;
profile) STATE_PROFILE="$value" ;;
started_by_script) STATE_STARTED_BY_SCRIPT="$value" ;;
esac
done < "$STATE_FILE"
}
resolve_runtime_auto() {
if command -v colima >/dev/null 2>&1; then
printf 'colima'
return
fi
if [[ -d "/Applications/Docker.app" ]] || command -v docker >/dev/null 2>&1; then
printf 'docker-desktop'
return
fi
die "No supported runtime found. Run setup first."
}
start_colima_runtime() {
require_cmd colima
require_cmd docker
require_cmd act
local started="0"
if colima_is_running; then
log "Colima profile '${COLIMA_PROFILE}' is already running."
else
log "Starting Colima profile '${COLIMA_PROFILE}' (cpu=${COLIMA_CPU}, memory=${COLIMA_MEMORY_GB}GB, disk=${COLIMA_DISK_GB}GB)..."
colima start --profile "$COLIMA_PROFILE" --cpu "$COLIMA_CPU" --memory "$COLIMA_MEMORY_GB" --disk "$COLIMA_DISK_GB"
started="1"
fi
local context
context="$(colima_context_name "$COLIMA_PROFILE")"
if docker context ls --format '{{.Name}}' | grep -Fxq "$context"; then
docker context use "$context" >/dev/null 2>&1 || true
fi
wait_for_docker
write_state "colima" "$started"
log "Runtime ready (colima)."
log "Try: act -W .github/workflows/ci-quality-gates.yml"
}
start_docker_desktop_runtime() {
require_cmd docker
require_cmd act
require_cmd open
local started="0"
if docker_ready; then
log "Docker daemon already running."
else
log "Starting Docker Desktop..."
open -ga Docker
started="1"
fi
wait_for_docker
write_state "docker-desktop" "$started"
log "Runtime ready (docker-desktop)."
log "Try: act -W .github/workflows/ci-quality-gates.yml"
}
stop_colima_runtime() {
require_cmd colima
if colima_is_running; then
log "Stopping Colima profile '${COLIMA_PROFILE}'..."
colima stop --profile "$COLIMA_PROFILE"
else
log "Colima profile '${COLIMA_PROFILE}' is already stopped."
fi
}
stop_docker_desktop_runtime() {
require_cmd osascript
log "Stopping Docker Desktop..."
osascript -e 'quit app "Docker"' >/dev/null 2>&1 || true
}
do_setup() {
ensure_macos
ensure_command_line_tools
ensure_homebrew
local required_formulas=(git act colima docker)
local missing_formulas=()
local missing_formula
while IFS= read -r missing_formula; do
[[ -n "$missing_formula" ]] || continue
missing_formulas+=("$missing_formula")
done < <(list_missing_formulas "${required_formulas[@]}" || true)
if [[ "${#missing_formulas[@]}" -eq 0 ]]; then
log "All required formulas already installed: ${required_formulas[*]}"
if [[ "$REFRESH_BREW" == "true" ]]; then
log "Refreshing Homebrew metadata (--refresh-brew)..."
brew update
else
log "Skipping brew update; nothing to install."
fi
log "Setup complete (no changes required)."
log "Next: ./scripts/actions-local.sh --mode start"
return
fi
log "Missing formulas detected: ${missing_formulas[*]}"
log "Updating Homebrew metadata..."
brew update
local formula
for formula in "${required_formulas[@]}"; do
install_brew_formula_if_missing "$formula"
done
log "Setup complete."
log "Next: ./scripts/actions-local.sh --mode start"
}
do_start() {
ensure_macos
local selected_runtime="$RUNTIME"
if [[ "$selected_runtime" == "auto" ]]; then
selected_runtime="$(resolve_runtime_auto)"
fi
case "$selected_runtime" in
colima)
start_colima_runtime
;;
docker-desktop)
start_docker_desktop_runtime
;;
*)
die "unsupported runtime: $selected_runtime"
;;
esac
}
do_stop() {
ensure_macos
read_state
local selected_runtime="$RUNTIME"
local should_stop="1"
if [[ "$selected_runtime" == "auto" ]]; then
if [[ -n "$STATE_RUNTIME" ]]; then
selected_runtime="$STATE_RUNTIME"
if [[ -n "$STATE_PROFILE" ]]; then
COLIMA_PROFILE="$STATE_PROFILE"
fi
if [[ "$STATE_STARTED_BY_SCRIPT" != "1" ]]; then
should_stop="0"
fi
else
if command -v colima >/dev/null 2>&1; then
selected_runtime="colima"
elif [[ -d "/Applications/Docker.app" ]] || command -v docker >/dev/null 2>&1; then
selected_runtime="docker-desktop"
else
log "No local Actions runtime is installed or tracked. Nothing to stop."
return
fi
should_stop="0"
fi
fi
if [[ "$should_stop" != "1" && "$RUNTIME_EXPLICIT" != "true" ]]; then
log "No runtime started by this script is currently tracked. Nothing to stop."
log "Pass --runtime colima or --runtime docker-desktop to force a stop."
return
fi
case "$selected_runtime" in
colima)
stop_colima_runtime
;;
docker-desktop)
stop_docker_desktop_runtime
;;
*)
die "unsupported runtime: $selected_runtime"
;;
esac
if [[ -f "$STATE_FILE" ]]; then
rm -f "$STATE_FILE"
fi
log "Stop complete."
}
main() {
parse_args "$@"
case "$MODE" in
setup) do_setup ;;
start) do_start ;;
stop) do_stop ;;
*) die "unexpected mode: $MODE" ;;
esac
}
main "$@"

View File

@@ -0,0 +1,111 @@
#!/usr/bin/env bash
# check-browser-parity.sh — Verify Chrome/Firefox extension parity for all providers.
#
# Compares all source files between Firefox (-exporter-extension) and Chrome
# (-exporter-chrome) variants. The only allowed difference is the
# browser_specific_settings.gecko block in manifest.json.
#
# Usage: scripts/check-browser-parity.sh
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
EXT_ROOT="$REPO_ROOT/browser-extensions/history-extensions"
PROVIDERS=(gemini copilot deepseek grok perplexity poe)
# Files that must be byte-identical between variants.
PARITY_FILES=(
src/content/content.js
src/lib/export.js
src/lib/popup-core.js
src/lib/popup-utils.js
src/popup/popup.js
src/popup/popup.html
src/popup/popup.css
src/popup/permissions.html
)
log() {
printf '[parity] %s\n' "$*"
}
err() {
printf '[parity] FAIL: %s\n' "$*" >&2
}
failures=0
checks=0
for provider in "${PROVIDERS[@]}"; do
firefox_dir="$EXT_ROOT/${provider}-exporter-extension"
chrome_dir="$EXT_ROOT/${provider}-exporter-chrome"
if [[ ! -d "$firefox_dir" ]]; then
err "$provider — Firefox directory missing: $firefox_dir"
failures=$((failures + 1))
continue
fi
if [[ ! -d "$chrome_dir" ]]; then
err "$provider — Chrome directory missing: $chrome_dir"
failures=$((failures + 1))
continue
fi
for file in "${PARITY_FILES[@]}"; do
checks=$((checks + 1))
ff_path="$firefox_dir/$file"
cr_path="$chrome_dir/$file"
if [[ ! -f "$ff_path" ]]; then
err "$provider — Firefox missing: $file"
failures=$((failures + 1))
continue
fi
if [[ ! -f "$cr_path" ]]; then
err "$provider — Chrome missing: $file"
failures=$((failures + 1))
continue
fi
if ! diff -q "$ff_path" "$cr_path" >/dev/null 2>&1; then
err "$provider$file differs between Firefox and Chrome"
failures=$((failures + 1))
fi
done
# Validate manifest.json: only browser_specific_settings.gecko should differ.
checks=$((checks + 1))
ff_manifest="$firefox_dir/manifest.json"
cr_manifest="$chrome_dir/manifest.json"
if [[ ! -f "$ff_manifest" || ! -f "$cr_manifest" ]]; then
err "$provider — manifest.json missing from one or both variants"
failures=$((failures + 1))
continue
fi
# Strip browser_specific_settings block from Firefox manifest and normalize
# trailing commas so the remaining JSON structure matches Chrome.
ff_stripped=$(sed '/"browser_specific_settings"/,/^ }/d' "$ff_manifest" | sed '/^$/d' | sed 's/,$//')
cr_stripped=$(sed '/^$/d' "$cr_manifest" | sed 's/,$//')
if ! diff -q <(echo "$ff_stripped") <(echo "$cr_stripped") >/dev/null 2>&1; then
err "$provider — manifest.json has unexpected differences beyond browser_specific_settings"
failures=$((failures + 1))
fi
done
echo ""
passed=$((checks - failures))
log "Results: ${passed} passed, ${failures} failed (${checks} checks across ${#PROVIDERS[@]} providers)"
if [[ "$failures" -gt 0 ]]; then
err "Browser parity check failed."
exit 1
fi
log "All browser parity checks passed."
exit 0

View File

@@ -0,0 +1,115 @@
#!/usr/bin/env bash
# check-contract-drift.sh — Enforce Constitution Principle V (contracts stay in lock-step).
#
# Fails when boundary-signature changes are detected under internal layers without
# any update under contracts/*.md in the same diff range.
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$REPO_ROOT"
log() {
printf '[contract-drift] %s\n' "$*"
}
err() {
printf '[contract-drift] ERROR: %s\n' "$*" >&2
}
resolve_range() {
if [[ -n "${AUGUR_CONTRACT_DRIFT_RANGE:-}" ]]; then
printf '%s' "$AUGUR_CONTRACT_DRIFT_RANGE"
return 0
fi
if [[ -n "${GITHUB_BASE_REF:-}" ]]; then
git fetch --no-tags --depth=1 origin "$GITHUB_BASE_REF" >/dev/null 2>&1 || true
printf 'origin/%s...HEAD' "$GITHUB_BASE_REF"
return 0
fi
if [[ -n "${GITHUB_EVENT_BEFORE:-}" ]] && [[ -n "${GITHUB_SHA:-}" ]] && [[ "$GITHUB_EVENT_BEFORE" != "0000000000000000000000000000000000000000" ]]; then
printf '%s...%s' "$GITHUB_EVENT_BEFORE" "$GITHUB_SHA"
return 0
fi
if git rev-parse --verify HEAD~1 >/dev/null 2>&1; then
printf 'HEAD~1...HEAD'
return 0
fi
printf ''
}
USE_WORKTREE="${AUGUR_CONTRACT_DRIFT_USE_WORKTREE:-0}"
RANGE=""
if [[ "$USE_WORKTREE" == "1" ]]; then
log "Diff source: working tree (HEAD -> working tree)"
changed_files="$(git diff --name-only)"
else
RANGE="$(resolve_range)"
if [[ -z "$RANGE" ]]; then
log "No diff range could be resolved; skipping contract drift check."
exit 0
fi
log "Diff range: $RANGE"
changed_files="$(git diff --name-only "$RANGE")"
fi
if [[ -z "$changed_files" ]]; then
log "No changed files in range; skipping."
exit 0
fi
if printf '%s\n' "$changed_files" | grep -Eq '^contracts/.*\.md$'; then
log "Contract files changed in range; check passed."
exit 0
fi
# Boundary-sensitive files that define cross-layer contracts.
boundary_files="$(printf '%s\n' "$changed_files" | grep -E '^internal/(cli|service|provider|storage|sync|model)/.*\.go$' || true)"
if [[ -z "$boundary_files" ]]; then
log "No boundary-sensitive Go files changed; check passed."
exit 0
fi
violations=()
while IFS= read -r file; do
[[ -z "$file" ]] && continue
# Canonical model and provider interface are always contract-relevant.
if [[ "$file" == "internal/model/conversation.go" ]] || [[ "$file" == "internal/provider/provider.go" ]]; then
violations+=("$file")
continue
fi
# Heuristic: exported symbol signature/shape changes in boundary layers are contract-relevant.
# Matches exported funcs, exported interfaces, and exported struct fields with JSON tags.
diff_output=""
if [[ "$USE_WORKTREE" == "1" ]]; then
diff_output="$(git diff -U0 -- "$file")"
else
diff_output="$(git diff -U0 "$RANGE" -- "$file")"
fi
if printf '%s\n' "$diff_output" | grep -Eq '^[+-](func (\([^)]*\) )?[A-Z][A-Za-z0-9_]*\(|type [A-Z][A-Za-z0-9_]* interface|[[:space:]]+[A-Z][A-Za-z0-9_]*[[:space:]].*`json:"[^"]+"`)'; then
violations+=("$file")
fi
done <<< "$boundary_files"
if [[ "${#violations[@]}" -eq 0 ]]; then
log "No contract-relevant signature drift detected; check passed."
exit 0
fi
err "Contract drift detected: contract-relevant files changed without contracts/*.md updates."
err "Update the applicable contract file(s) in contracts/ in the same change."
err "Impacted files:"
for file in "${violations[@]}"; do
err " - $file"
done
exit 1

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env bash
# check-coverage-thresholds.sh — Enforce minimum test coverage for critical packages.
#
# Runs `go test -cover` on specified packages and fails if any package
# drops below its defined minimum coverage threshold.
#
# Usage: scripts/check-coverage-thresholds.sh
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$REPO_ROOT"
log() {
printf '[coverage] %s\n' "$*"
}
err() {
printf '[coverage] FAIL: %s\n' "$*" >&2
}
# Package thresholds: "package_path:minimum_percent"
# Set ~2% below current values to catch regressions without blocking on noise.
THRESHOLDS=(
"internal/sync:70"
"internal/storage:60"
"internal/service:50"
"internal/service/conversion:80"
"internal/cli:30"
"internal/model:40"
)
failures=0
passes=0
for entry in "${THRESHOLDS[@]}"; do
pkg="${entry%%:*}"
threshold="${entry##*:}"
# Run go test with coverage and extract percentage
output=$(go test -cover "./$pkg" 2>&1) || {
err "$pkg — tests failed"
failures=$((failures + 1))
continue
}
# Extract coverage percentage (e.g., "coverage: 72.1% of statements")
coverage=$(echo "$output" | grep -oE 'coverage: [0-9]+\.[0-9]+%' | grep -oE '[0-9]+\.[0-9]+' || echo "0.0")
if [[ -z "$coverage" || "$coverage" == "0.0" ]]; then
# Package might have no test files or no statements
if echo "$output" | grep -q '\[no test files\]'; then
err "$pkg — no test files (threshold: ${threshold}%)"
failures=$((failures + 1))
else
err "$pkg — could not determine coverage (threshold: ${threshold}%)"
failures=$((failures + 1))
fi
continue
fi
# Compare using awk for floating-point comparison
passed=$(awk "BEGIN { print ($coverage >= $threshold) ? 1 : 0 }")
if [[ "$passed" -eq 1 ]]; then
log "$pkg: ${coverage}% >= ${threshold}% threshold"
passes=$((passes + 1))
else
err "$pkg: ${coverage}% < ${threshold}% threshold"
failures=$((failures + 1))
fi
done
echo ""
log "Results: ${passes} passed, ${failures} failed (${#THRESHOLDS[@]} packages checked)"
if [[ "$failures" -gt 0 ]]; then
err "Coverage threshold check failed."
exit 1
fi
log "All coverage thresholds met."
exit 0

View File

@@ -0,0 +1,171 @@
#!/usr/bin/env bash
# ci-local.sh — Run augur CI quality gates locally.
# Mirrors .github/workflows/ci-quality-gates.yml without GitHub-hosted runners.
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$REPO_ROOT"
run_contracts=false
run_backend=false
run_extensions=false
explicit_stage=false
skip_install=false
declare -a suites=()
declare -a default_suites=(
"tests"
"tests-copilot"
"tests-deepseek"
"tests-perplexity"
"tests-grok"
"tests-poe"
)
usage() {
cat <<'EOF'
Usage: ./scripts/ci-local.sh [options]
Runs local CI gates equivalent to .github/workflows/ci-quality-gates.yml:
1) contracts -> scripts/check-contract-drift.sh
2) backend -> go mod download, go vet ./..., go test ./... -count=1
3) extensions -> npm ci + npm test in each extension test suite
If no stage options are provided, all stages run.
Options:
--contracts Run contracts drift check stage
--backend Run backend Go stage
--extensions Run extension Jest stage
--suite NAME Extension suite name (repeatable), e.g. tests-deepseek
--skip-install Skip dependency install steps (go mod download, npm ci)
-h, --help Show this help
Examples:
./scripts/ci-local.sh
./scripts/ci-local.sh --backend
./scripts/ci-local.sh --extensions --suite tests --suite tests-copilot
./scripts/ci-local.sh --contracts --backend --skip-install
EOF
}
log() {
printf '[ci-local] %s\n' "$*"
}
die() {
printf '[ci-local] ERROR: %s\n' "$*" >&2
exit 1
}
require_cmd() {
local cmd="$1"
command -v "$cmd" >/dev/null 2>&1 || die "required command not found: $cmd"
}
parse_args() {
while [[ $# -gt 0 ]]; do
case "$1" in
--contracts)
explicit_stage=true
run_contracts=true
shift
;;
--backend)
explicit_stage=true
run_backend=true
shift
;;
--extensions)
explicit_stage=true
run_extensions=true
shift
;;
--suite)
shift
[[ $# -gt 0 ]] || die "--suite requires a value"
suites+=("$1")
shift
;;
--skip-install)
skip_install=true
shift
;;
-h|--help)
usage
exit 0
;;
*)
die "unknown argument: $1"
;;
esac
done
if [[ "$explicit_stage" == false ]]; then
run_contracts=true
run_backend=true
run_extensions=true
fi
if [[ "${#suites[@]}" -eq 0 ]]; then
suites=("${default_suites[@]}")
fi
}
run_contracts_stage() {
require_cmd git
log "Stage: contracts"
AUGUR_CONTRACT_DRIFT_USE_WORKTREE=1 scripts/check-contract-drift.sh
}
run_backend_stage() {
require_cmd go
log "Stage: backend"
if [[ "$skip_install" == false ]]; then
go mod download
fi
go vet ./...
go test ./... -count=1
}
run_extensions_stage() {
require_cmd npm
log "Stage: extensions"
for suite in "${suites[@]}"; do
local suite_dir="$REPO_ROOT/browser-extensions/history-extensions/$suite"
[[ -d "$suite_dir" ]] || die "extension suite directory not found: $suite_dir"
log "Suite: $suite"
if [[ "$skip_install" == false ]]; then
(cd "$suite_dir" && npm ci)
fi
(cd "$suite_dir" && npm test -- --runInBand)
done
}
main() {
parse_args "$@"
local started_at
started_at="$(date +%s)"
log "Starting local CI pipeline in $REPO_ROOT"
if [[ "$run_contracts" == true ]]; then
run_contracts_stage
fi
if [[ "$run_backend" == true ]]; then
run_backend_stage
fi
if [[ "$run_extensions" == true ]]; then
run_extensions_stage
fi
local ended_at duration
ended_at="$(date +%s)"
duration="$((ended_at - started_at))"
log "All selected stages passed (${duration}s)."
}
main "$@"

View File

@@ -0,0 +1,69 @@
# docker-compose.yml — GitHub Actions self-hosted runner orchestration.
#
# All configuration is injected via environment files:
# - .env → shared config (GITHUB_PAT)
# - envs/augur.env → per-repo config (identity, labels, resource limits)
#
# Quick start:
# cp .env.example .env && cp envs/augur.env.example envs/augur.env
# # Edit both files with your values
# docker compose up -d
#
# To add another repo: copy envs/augur.env.example to envs/<repo>.env,
# fill in values, and add a matching service block below.
x-runner-common: &runner-common
image: ${RUNNER_IMAGE:-ghcr.io/aiinfuseds/augur-runner:latest}
build: .
env_file:
- .env
- envs/augur.env
tmpfs:
- /tmp:size=2G,exec
security_opt:
- no-new-privileges:true
stop_grace_period: 5m
deploy:
resources:
limits:
cpus: "${RUNNER_CPUS:-4}"
memory: "${RUNNER_MEMORY:-4G}"
restart: unless-stopped
healthcheck:
test: ["CMD", "pgrep", "-f", "Runner.Listener"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
logging:
driver: json-file
options:
max-size: "50m"
max-file: "3"
services:
runner-augur-1:
<<: *runner-common
environment:
RUNNER_NAME: unraid-augur-1
volumes:
- augur-work-1:/home/runner/_work
runner-augur-2:
<<: *runner-common
environment:
RUNNER_NAME: unraid-augur-2
volumes:
- augur-work-2:/home/runner/_work
runner-augur-3:
<<: *runner-common
environment:
RUNNER_NAME: unraid-augur-3
volumes:
- augur-work-3:/home/runner/_work
volumes:
augur-work-1:
augur-work-2:
augur-work-3:

View File

@@ -0,0 +1,161 @@
#!/usr/bin/env bash
# entrypoint.sh — Container startup script for the GitHub Actions runner.
#
# Lifecycle:
# 1. Validate required environment variables
# 2. Generate a short-lived registration token from GITHUB_PAT
# 3. Configure the runner in ephemeral mode (one job, then exit)
# 4. Trap SIGTERM/SIGINT for graceful deregistration
# 5. Start the runner (run.sh)
#
# Docker's restart policy (restart: unless-stopped) brings the container
# back after each job completes, repeating this cycle.
set -euo pipefail
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
RUNNER_DIR="/home/runner/actions-runner"
RUNNER_LABELS="${RUNNER_LABELS:-self-hosted,Linux,X64}"
RUNNER_GROUP="${RUNNER_GROUP:-default}"
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
log() {
printf '[entrypoint] %s\n' "$*"
}
die() {
printf '[entrypoint] ERROR: %s\n' "$*" >&2
exit 1
}
# ---------------------------------------------------------------------------
# Environment validation — fail fast with clear errors
# ---------------------------------------------------------------------------
validate_env() {
local missing=()
[[ -z "${GITHUB_PAT:-}" ]] && missing+=("GITHUB_PAT")
[[ -z "${REPO_URL:-}" ]] && missing+=("REPO_URL")
[[ -z "${RUNNER_NAME:-}" ]] && missing+=("RUNNER_NAME")
if [[ ${#missing[@]} -gt 0 ]]; then
die "Missing required environment variables: ${missing[*]}. Check your .env and envs/*.env files."
fi
}
# ---------------------------------------------------------------------------
# Token generation — PAT → short-lived registration token
# ---------------------------------------------------------------------------
generate_token() {
# Extract OWNER/REPO from the full URL.
# Supports: https://github.com/OWNER/REPO or https://github.com/OWNER/REPO.git
local repo_slug
repo_slug="$(printf '%s' "$REPO_URL" \
| sed -E 's#^https?://github\.com/##' \
| sed -E 's/\.git$//')"
if [[ -z "$repo_slug" ]] || ! printf '%s' "$repo_slug" | grep -qE '^[^/]+/[^/]+$'; then
die "Could not parse OWNER/REPO from REPO_URL: $REPO_URL"
fi
log "Generating registration token for ${repo_slug}..."
local response
response="$(curl -fsSL \
-X POST \
-H "Authorization: token ${GITHUB_PAT}" \
-H "Accept: application/vnd.github+json" \
"https://api.github.com/repos/${repo_slug}/actions/runners/registration-token")"
REG_TOKEN="$(printf '%s' "$response" | jq -r '.token // empty')"
if [[ -z "$REG_TOKEN" ]]; then
die "Failed to generate registration token. Check that GITHUB_PAT has 'repo' scope and is valid."
fi
log "Registration token obtained (expires in 1 hour)."
}
# ---------------------------------------------------------------------------
# Cleanup — deregister runner on container stop
# ---------------------------------------------------------------------------
cleanup() {
log "Caught signal, removing runner registration..."
# Generate a removal token (different from registration token)
local repo_slug
repo_slug="$(printf '%s' "$REPO_URL" \
| sed -E 's#^https?://github\.com/##' \
| sed -E 's/\.git$//')"
local remove_token
remove_token="$(curl -fsSL \
-X POST \
-H "Authorization: token ${GITHUB_PAT}" \
-H "Accept: application/vnd.github+json" \
"https://api.github.com/repos/${repo_slug}/actions/runners/remove-token" \
| jq -r '.token // empty' || true)"
if [[ -n "$remove_token" ]]; then
"${RUNNER_DIR}/config.sh" remove --token "$remove_token" 2>/dev/null || true
log "Runner deregistered."
else
log "WARNING: Could not obtain removal token. Runner may appear stale in GitHub until it expires."
fi
exit 0
}
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
main() {
validate_env
generate_token
# Trap signals for graceful shutdown
trap cleanup SIGTERM SIGINT
# Remove stale configuration from previous run.
# On container restart (vs recreate), the runner's writable layer persists
# and config.sh refuses to re-configure if .runner already exists.
# The --replace flag only handles server-side name conflicts, not this local check.
if [[ -f "${RUNNER_DIR}/.runner" ]]; then
log "Removing stale runner configuration from previous run..."
rm -f "${RUNNER_DIR}/.runner" "${RUNNER_DIR}/.credentials" "${RUNNER_DIR}/.credentials_rsaparams"
fi
log "Configuring runner '${RUNNER_NAME}' for ${REPO_URL}..."
log "Labels: ${RUNNER_LABELS}"
log "Group: ${RUNNER_GROUP}"
"${RUNNER_DIR}/config.sh" \
--url "${REPO_URL}" \
--token "${REG_TOKEN}" \
--name "${RUNNER_NAME}" \
--labels "${RUNNER_LABELS}" \
--runnergroup "${RUNNER_GROUP}" \
--work "/home/runner/_work" \
--ephemeral \
--unattended \
--replace
log "Runner configured. Starting..."
# exec replaces the shell with the runner process.
# The runner picks up one job, executes it, and exits.
# Docker's restart policy restarts the container for the next job.
exec "${RUNNER_DIR}/run.sh"
}
main "$@"

View File

@@ -0,0 +1,28 @@
# augur.env — Per-repo runner configuration for the augur repository.
#
# Copy this file to augur.env and fill in your values:
# cp envs/augur.env.example envs/augur.env
#
# To add another repo, copy this file to envs/<repo>.env, adjust the values,
# and add a matching service block in docker-compose.yml.
# Runner image source (default: GHCR).
# For self-hosted registry on the same Docker engine:
# RUNNER_IMAGE=localhost:5000/augur-runner:latest
# Docker treats localhost registries as insecure by default — no daemon.json changes needed.
# RUNNER_IMAGE=ghcr.io/aiinfuseds/augur-runner:latest
# Repository to register this runner with.
REPO_URL=https://github.com/AIinfusedS/augur
# Runner identity — must be unique per runner within the repo.
RUNNER_NAME=unraid-augur
RUNNER_LABELS=self-hosted,Linux,X64
RUNNER_GROUP=default
# Resource limits for this repo's runner container.
# Tune based on the repo's CI workload.
# augur CI needs ~4 CPUs and ~4GB RAM for Go builds + extension tests.
# 3 runners x 4 CPUs = 12 cores total.
RUNNER_CPUS=4
RUNNER_MEMORY=4G

View File

@@ -0,0 +1,744 @@
#!/usr/bin/env bash
# runner.sh — Setup, manage, and tear down a GitHub Actions self-hosted runner.
#
# Supports two platforms:
# - macOS: Installs the runner agent natively, manages it as a launchd service.
# - Linux: Delegates to Docker-based runner infrastructure in infra/runners/.
#
# Typical flow:
# 1) ./scripts/runner.sh --mode setup # install/configure runner
# 2) ./scripts/runner.sh --mode status # verify runner is online
# 3) (push/PR triggers CI on the self-hosted runner)
# 4) ./scripts/runner.sh --mode stop # stop runner
# 5) ./scripts/runner.sh --mode uninstall # deregister and clean up
set -euo pipefail
MODE=""
RUNNER_DIR="${AUGUR_RUNNER_DIR:-${HOME}/.augur-runner}"
RUNNER_LABELS="self-hosted,macOS,ARM64"
RUNNER_NAME=""
REPO_SLUG=""
REG_TOKEN=""
FORCE=false
FOREGROUND=false
PUSH_REGISTRY=""
PLIST_LABEL="com.augur.actions-runner"
PLIST_PATH="${HOME}/Library/LaunchAgents/${PLIST_LABEL}.plist"
# Resolved during Linux operations
INFRA_DIR=""
usage() {
cat <<'EOF'
Usage:
./scripts/runner.sh --mode <setup|start|stop|status|build-image|uninstall> [options]
Required:
--mode MODE One of: setup, start, stop, status, build-image, uninstall
Options (macOS):
--runner-dir DIR Installation directory (default: ~/.augur-runner)
--labels LABELS Comma-separated labels (default: self-hosted,macOS,ARM64)
--name NAME Runner name (default: augur-<hostname>)
--repo OWNER/REPO GitHub repository (default: auto-detected from git remote)
--token TOKEN Registration/removal token (prompted if not provided)
--force Force re-setup even if already configured
--foreground Start in foreground instead of launchd service
Options (Linux — Docker mode):
On Linux, this script delegates to Docker Compose in infra/runners/.
Configuration is managed via .env and envs/*.env files.
See infra/runners/README.md for details.
Options (build-image):
--push REGISTRY Tag and push to a registry (e.g. 192.168.1.82:5000)
Common:
-h, --help Show this help
Examples (macOS):
./scripts/runner.sh --mode setup
./scripts/runner.sh --mode setup --token ghp_xxxxx
./scripts/runner.sh --mode start
./scripts/runner.sh --mode start --foreground
./scripts/runner.sh --mode status
./scripts/runner.sh --mode stop
./scripts/runner.sh --mode uninstall
Examples (Linux):
./scripts/runner.sh --mode setup # prompts for .env, starts runner
./scripts/runner.sh --mode start # docker compose up -d
./scripts/runner.sh --mode stop # docker compose down
./scripts/runner.sh --mode status # docker compose ps + logs
./scripts/runner.sh --mode uninstall # docker compose down -v --rmi local
Examples (build-image — works on any OS):
./scripts/runner.sh --mode build-image # build locally
./scripts/runner.sh --mode build-image --push 192.168.1.82:5000 # build + push to registry
Environment overrides:
AUGUR_RUNNER_DIR Runner installation directory (macOS only)
EOF
}
# ---------------------------------------------------------------------------
# Helpers (consistent with actions-local.sh)
# ---------------------------------------------------------------------------
log() {
printf '[runner] %s\n' "$*"
}
warn() {
printf '[runner] WARNING: %s\n' "$*" >&2
}
die() {
printf '[runner] ERROR: %s\n' "$*" >&2
exit 1
}
require_cmd() {
local cmd="$1"
command -v "$cmd" >/dev/null 2>&1 || die "required command not found: $cmd"
}
# ---------------------------------------------------------------------------
# Platform detection
# ---------------------------------------------------------------------------
detect_os() {
case "$(uname -s)" in
Darwin) printf 'darwin' ;;
Linux) printf 'linux' ;;
*) die "Unsupported OS: $(uname -s). This script supports macOS and Linux." ;;
esac
}
ensure_macos() {
[[ "$(detect_os)" == "darwin" ]] || die "This operation requires macOS."
}
# Locate the infra/runners/ directory relative to the repo root.
# The script lives at scripts/runner.sh, so repo root is one level up.
find_infra_dir() {
local script_dir
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
local repo_root="${script_dir}/.."
INFRA_DIR="$(cd "${repo_root}/infra/runners" 2>/dev/null && pwd)" || true
if [[ -z "$INFRA_DIR" ]] || [[ ! -f "${INFRA_DIR}/docker-compose.yml" ]]; then
die "Could not find infra/runners/docker-compose.yml. Ensure you are running from the augur repo."
fi
}
# ---------------------------------------------------------------------------
# Argument parsing
# ---------------------------------------------------------------------------
parse_args() {
while [[ $# -gt 0 ]]; do
case "$1" in
--mode)
shift; [[ $# -gt 0 ]] || die "--mode requires a value"
MODE="$1"; shift ;;
--runner-dir)
shift; [[ $# -gt 0 ]] || die "--runner-dir requires a value"
RUNNER_DIR="$1"; shift ;;
--labels)
shift; [[ $# -gt 0 ]] || die "--labels requires a value"
RUNNER_LABELS="$1"; shift ;;
--name)
shift; [[ $# -gt 0 ]] || die "--name requires a value"
RUNNER_NAME="$1"; shift ;;
--repo)
shift; [[ $# -gt 0 ]] || die "--repo requires a value"
REPO_SLUG="$1"; shift ;;
--token)
shift; [[ $# -gt 0 ]] || die "--token requires a value"
REG_TOKEN="$1"; shift ;;
--force)
FORCE=true; shift ;;
--foreground)
FOREGROUND=true; shift ;;
--push)
shift; [[ $# -gt 0 ]] || die "--push requires a registry address (e.g. 192.168.1.82:5000)"
PUSH_REGISTRY="$1"; shift ;;
-h|--help)
usage; exit 0 ;;
*)
die "unknown argument: $1" ;;
esac
done
[[ -n "$MODE" ]] || die "--mode is required (setup|start|stop|status|build-image|uninstall)"
case "$MODE" in
setup|start|stop|status|build-image|uninstall) ;;
*) die "invalid --mode: $MODE (expected setup|start|stop|status|build-image|uninstall)" ;;
esac
}
# ---------------------------------------------------------------------------
# Repo detection
# ---------------------------------------------------------------------------
detect_repo() {
if [[ -n "$REPO_SLUG" ]]; then
return
fi
local remote_url=""
remote_url="$(git remote get-url origin 2>/dev/null || true)"
if [[ -z "$remote_url" ]]; then
die "Could not detect repository from git remote. Use --repo OWNER/REPO."
fi
# Extract OWNER/REPO from HTTPS or SSH URLs
REPO_SLUG="$(printf '%s' "$remote_url" \
| sed -E 's#^(https?://github\.com/|git@github\.com:)##' \
| sed -E 's/\.git$//')"
if [[ -z "$REPO_SLUG" ]] || ! printf '%s' "$REPO_SLUG" | grep -qE '^[^/]+/[^/]+$'; then
die "Could not parse OWNER/REPO from remote URL: $remote_url. Use --repo OWNER/REPO."
fi
log "Auto-detected repository: $REPO_SLUG"
}
# ===========================================================================
# macOS: Native runner agent + launchd service
# ===========================================================================
# ---------------------------------------------------------------------------
# Runner download and verification (macOS)
# ---------------------------------------------------------------------------
detect_arch() {
local arch
arch="$(uname -m)"
case "$arch" in
arm64|aarch64) printf 'arm64' ;;
x86_64) printf 'x64' ;;
*) die "Unsupported architecture: $arch" ;;
esac
}
download_runner() {
require_cmd curl
require_cmd shasum
require_cmd tar
local arch
arch="$(detect_arch)"
log "Fetching latest runner release metadata..."
local release_json
release_json="$(curl -fsSL "https://api.github.com/repos/actions/runner/releases/latest")"
local version
version="$(printf '%s' "$release_json" | grep '"tag_name"' | sed -E 's/.*"v([^"]+)".*/\1/')"
if [[ -z "$version" ]]; then
die "Could not determine latest runner version from GitHub API."
fi
log "Latest runner version: $version"
local tarball="actions-runner-osx-${arch}-${version}.tar.gz"
local download_url="https://github.com/actions/runner/releases/download/v${version}/${tarball}"
# Extract expected SHA256 from release body.
# The body contains HTML comments like:
# <!-- BEGIN SHA osx-arm64 -->HASH<!-- END SHA osx-arm64 -->
local sha_marker="osx-${arch}"
local expected_sha=""
expected_sha="$(printf '%s' "$release_json" \
| python3 -c "
import json,sys,re
body = json.load(sys.stdin).get('body','')
m = re.search(r'<!-- BEGIN SHA ${sha_marker} -->([0-9a-f]{64})<!-- END SHA ${sha_marker} -->', body)
print(m.group(1) if m else '')
" 2>/dev/null || true)"
mkdir -p "$RUNNER_DIR"
local dest="${RUNNER_DIR}/${tarball}"
if [[ -f "$dest" ]]; then
log "Tarball already exists: $dest"
else
log "Downloading: $download_url"
curl -fSL -o "$dest" "$download_url"
fi
if [[ -n "$expected_sha" ]]; then
log "Verifying SHA256 checksum..."
local actual_sha
actual_sha="$(shasum -a 256 "$dest" | awk '{print $1}')"
if [[ "$actual_sha" != "$expected_sha" ]]; then
rm -f "$dest"
die "Checksum mismatch. Expected: $expected_sha, Got: $actual_sha"
fi
log "Checksum verified."
else
warn "Could not extract expected SHA256 from release metadata; skipping verification."
fi
log "Extracting runner into $RUNNER_DIR..."
tar -xzf "$dest" -C "$RUNNER_DIR"
rm -f "$dest"
log "Runner extracted (version $version)."
}
# ---------------------------------------------------------------------------
# Registration (macOS)
# ---------------------------------------------------------------------------
prompt_token() {
if [[ -n "$REG_TOKEN" ]]; then
return
fi
log ""
log "A registration token is required."
log "Obtain one from: https://github.com/${REPO_SLUG}/settings/actions/runners/new"
log "Or via the API:"
log " curl -X POST -H 'Authorization: token YOUR_PAT' \\"
log " https://api.github.com/repos/${REPO_SLUG}/actions/runners/registration-token"
log ""
printf '[runner] Enter registration token: '
read -r REG_TOKEN
[[ -n "$REG_TOKEN" ]] || die "No token provided."
}
register_runner() {
if [[ -z "$RUNNER_NAME" ]]; then
RUNNER_NAME="augur-$(hostname -s)"
fi
log "Registering runner '${RUNNER_NAME}' with labels '${RUNNER_LABELS}'..."
local config_args=(
--url "https://github.com/${REPO_SLUG}"
--token "$REG_TOKEN"
--name "$RUNNER_NAME"
--labels "$RUNNER_LABELS"
--work "${RUNNER_DIR}/_work"
--unattended
)
if [[ "$FORCE" == "true" ]]; then
config_args+=(--replace)
fi
"${RUNNER_DIR}/config.sh" "${config_args[@]}"
log "Runner registered."
}
# ---------------------------------------------------------------------------
# launchd service management (macOS)
# ---------------------------------------------------------------------------
create_plist() {
mkdir -p "${RUNNER_DIR}/logs"
mkdir -p "$(dirname "$PLIST_PATH")"
cat > "$PLIST_PATH" <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>${PLIST_LABEL}</string>
<key>ProgramArguments</key>
<array>
<string>${RUNNER_DIR}/run.sh</string>
</array>
<key>WorkingDirectory</key>
<string>${RUNNER_DIR}</string>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardOutPath</key>
<string>${RUNNER_DIR}/logs/stdout.log</string>
<key>StandardErrorPath</key>
<string>${RUNNER_DIR}/logs/stderr.log</string>
<key>EnvironmentVariables</key>
<dict>
<key>PATH</key>
<string>/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin</string>
<key>HOME</key>
<string>${HOME}</string>
</dict>
</dict>
</plist>
EOF
log "Launchd plist created: $PLIST_PATH"
}
load_service() {
if launchctl list 2>/dev/null | grep -q "$PLIST_LABEL"; then
log "Service already loaded; unloading first..."
launchctl unload "$PLIST_PATH" 2>/dev/null || true
fi
launchctl load "$PLIST_PATH"
log "Service loaded."
}
unload_service() {
if launchctl list 2>/dev/null | grep -q "$PLIST_LABEL"; then
launchctl unload "$PLIST_PATH" 2>/dev/null || true
log "Service unloaded."
else
log "Service is not loaded."
fi
}
service_is_running() {
launchctl list 2>/dev/null | grep -q "$PLIST_LABEL"
}
# ---------------------------------------------------------------------------
# macOS mode implementations
# ---------------------------------------------------------------------------
do_setup_darwin() {
detect_repo
if [[ -f "${RUNNER_DIR}/.runner" ]] && [[ "$FORCE" != "true" ]]; then
log "Runner already configured at $RUNNER_DIR."
log "Use --force to re-setup."
do_status_darwin
return
fi
download_runner
prompt_token
register_runner
create_plist
load_service
log ""
log "Setup complete. Runner is registered and running."
log ""
log "To activate self-hosted CI, set the repository variable CI_RUNS_ON to:"
log ' ["self-hosted", "macOS", "ARM64"]'
log "in Settings > Secrets and variables > Actions > Variables."
log ""
log "Or via CLI:"
log " gh variable set CI_RUNS_ON --body '[\"self-hosted\", \"macOS\", \"ARM64\"]'"
log ""
log "Energy saver: ensure your Mac does not sleep while the runner is active."
log " System Settings > Energy Saver > Prevent automatic sleeping"
}
do_start_darwin() {
[[ -f "${RUNNER_DIR}/.runner" ]] || die "Runner not configured. Run --mode setup first."
if [[ "$FOREGROUND" == "true" ]]; then
log "Starting runner in foreground (Ctrl-C to stop)..."
exec "${RUNNER_DIR}/run.sh"
fi
if service_is_running; then
log "Runner service is already running."
return
fi
if [[ ! -f "$PLIST_PATH" ]]; then
log "Plist not found; recreating..."
create_plist
fi
load_service
log "Runner started."
}
do_stop_darwin() {
unload_service
log "Runner stopped."
}
do_status_darwin() {
log "Runner directory: $RUNNER_DIR"
if [[ ! -f "${RUNNER_DIR}/.runner" ]]; then
log "Status: NOT CONFIGURED"
log "Run --mode setup to install and register the runner."
return
fi
# Parse runner config
local runner_name=""
if command -v python3 >/dev/null 2>&1; then
runner_name="$(python3 -c "import json,sys; d=json.load(open(sys.argv[1])); print(d.get('agentName',''))" "${RUNNER_DIR}/.runner" 2>/dev/null || true)"
fi
if [[ -z "$runner_name" ]]; then
runner_name="(could not parse)"
fi
log "Runner name: $runner_name"
if service_is_running; then
log "Service: RUNNING"
else
log "Service: STOPPED"
fi
if pgrep -f "Runner.Listener" >/dev/null 2>&1; then
log "Process: ACTIVE (Runner.Listener found)"
else
log "Process: INACTIVE"
fi
# Show recent logs
local log_file="${RUNNER_DIR}/logs/stdout.log"
if [[ -f "$log_file" ]]; then
log ""
log "Recent log output (last 10 lines):"
tail -n 10 "$log_file" 2>/dev/null || true
fi
local diag_dir="${RUNNER_DIR}/_diag"
if [[ -d "$diag_dir" ]]; then
local latest_diag
latest_diag="$(ls -t "${diag_dir}"/Runner_*.log 2>/dev/null | head -n1 || true)"
if [[ -n "$latest_diag" ]]; then
log ""
log "Latest runner diagnostic (last 5 lines):"
tail -n 5 "$latest_diag" 2>/dev/null || true
fi
fi
}
do_uninstall_darwin() {
log "Uninstalling self-hosted runner..."
# Stop service first
unload_service
# Remove plist
if [[ -f "$PLIST_PATH" ]]; then
rm -f "$PLIST_PATH"
log "Removed plist: $PLIST_PATH"
fi
# Deregister from GitHub
if [[ -f "${RUNNER_DIR}/config.sh" ]]; then
if [[ -z "$REG_TOKEN" ]]; then
detect_repo
log ""
log "A removal token is required to deregister the runner."
log "Obtain one from: https://github.com/${REPO_SLUG}/settings/actions/runners"
log "Or via the API:"
log " curl -X POST -H 'Authorization: token YOUR_PAT' \\"
log " https://api.github.com/repos/${REPO_SLUG}/actions/runners/remove-token"
log ""
printf '[runner] Enter removal token (or press Enter to skip deregistration): '
read -r REG_TOKEN
fi
if [[ -n "$REG_TOKEN" ]]; then
"${RUNNER_DIR}/config.sh" remove --token "$REG_TOKEN" || warn "Deregistration failed; you may need to remove the runner manually from GitHub settings."
log "Runner deregistered from GitHub."
else
warn "Skipping deregistration. Remove the runner manually from GitHub settings."
fi
fi
# Clean up runner directory
if [[ -d "$RUNNER_DIR" ]]; then
log "Removing runner directory: $RUNNER_DIR"
rm -rf "$RUNNER_DIR"
log "Runner directory removed."
fi
log "Uninstall complete."
}
# ===========================================================================
# Linux: Docker-based runner via infra/runners/
# ===========================================================================
# Ensure Docker and docker compose are available.
ensure_docker() {
require_cmd docker
# Check for docker compose (v2 plugin or standalone)
if docker compose version >/dev/null 2>&1; then
return
fi
if command -v docker-compose >/dev/null 2>&1; then
warn "Found docker-compose (standalone). docker compose v2 plugin is recommended."
return
fi
die "docker compose is required. Install Docker Compose v2: https://docs.docker.com/compose/install/"
}
# Run docker compose in the infra/runners directory.
# Accepts any docker compose subcommand and arguments.
compose() {
docker compose -f "${INFRA_DIR}/docker-compose.yml" "$@"
}
do_build_image() {
find_infra_dir
ensure_docker
local dockerfile_dir="${INFRA_DIR}"
# Determine the image tag based on whether --push was given.
# With --push: tag includes the registry so docker push knows where to send it.
# Without --push: clean local name.
local image_tag="augur-runner:latest"
if [[ -n "$PUSH_REGISTRY" ]]; then
image_tag="${PUSH_REGISTRY}/augur-runner:latest"
fi
# Always target linux/amd64 — the Dockerfile hardcodes x86_64 binaries
# (Go linux-amd64, runner agent linux-x64). This ensures correct arch
# even when building on an ARM Mac.
log "Building runner image: ${image_tag} (platform: linux/amd64)"
DOCKER_BUILDKIT=1 docker build --platform linux/amd64 --pull -t "$image_tag" "$dockerfile_dir"
if [[ -n "$PUSH_REGISTRY" ]]; then
log "Pushing to ${PUSH_REGISTRY}..."
docker push "$image_tag"
log "Image pushed to ${image_tag}"
else
log "Image built locally as ${image_tag}"
log "Use --push <registry> to push to a remote registry."
fi
}
do_setup_linux() {
find_infra_dir
ensure_docker
log "Docker-based runner setup (infra/runners/)"
log ""
# Create .env from template if it doesn't exist
if [[ ! -f "${INFRA_DIR}/.env" ]]; then
if [[ -f "${INFRA_DIR}/.env.example" ]]; then
cp "${INFRA_DIR}/.env.example" "${INFRA_DIR}/.env"
log "Created ${INFRA_DIR}/.env from template."
log "Edit this file to set your GITHUB_PAT."
log ""
printf '[runner] Enter your GitHub PAT (or press Enter to edit .env manually later): '
read -r pat_input
if [[ -n "$pat_input" ]]; then
sed -i "s/^GITHUB_PAT=.*/GITHUB_PAT=${pat_input}/" "${INFRA_DIR}/.env"
log "GITHUB_PAT set in .env"
fi
else
die "Missing .env.example template in ${INFRA_DIR}"
fi
else
log ".env already exists; skipping."
fi
# Create per-repo env from template if it doesn't exist
if [[ ! -f "${INFRA_DIR}/envs/augur.env" ]]; then
if [[ -f "${INFRA_DIR}/envs/augur.env.example" ]]; then
cp "${INFRA_DIR}/envs/augur.env.example" "${INFRA_DIR}/envs/augur.env"
log "Created ${INFRA_DIR}/envs/augur.env from template."
log "Edit this file to configure REPO_URL, RUNNER_NAME, and resource limits."
else
die "Missing envs/augur.env.example template in ${INFRA_DIR}"
fi
else
log "envs/augur.env already exists; skipping."
fi
log ""
log "Starting runner..."
compose up -d
log ""
log "Setup complete. Verify with: ./scripts/runner.sh --mode status"
log ""
log "To activate self-hosted CI, set the repository variable CI_RUNS_ON to:"
log ' ["self-hosted", "Linux", "X64"]'
log ""
log "Via CLI:"
log " gh variable set CI_RUNS_ON --body '[\"self-hosted\", \"Linux\", \"X64\"]'"
}
do_start_linux() {
find_infra_dir
ensure_docker
log "Starting Docker runner..."
compose up -d
log "Runner started."
}
do_stop_linux() {
find_infra_dir
ensure_docker
log "Stopping Docker runner..."
compose down
log "Runner stopped."
}
do_status_linux() {
find_infra_dir
ensure_docker
log "Docker runner status (infra/runners/):"
log ""
compose ps
log ""
log "Recent logs (last 20 lines):"
compose logs --tail 20 2>/dev/null || true
}
do_uninstall_linux() {
find_infra_dir
ensure_docker
log "Uninstalling Docker runner..."
compose down -v --rmi local 2>/dev/null || compose down -v
log "Docker runner removed (containers, volumes, local images)."
log ""
log "Note: The runner should auto-deregister from GitHub (ephemeral mode)."
log "If a stale runner remains, remove it manually:"
log " gh api -X DELETE repos/OWNER/REPO/actions/runners/RUNNER_ID"
}
# ===========================================================================
# Entry point — routes to macOS or Linux implementation
# ===========================================================================
main() {
parse_args "$@"
local os
os="$(detect_os)"
case "$MODE" in
setup)
if [[ "$os" == "darwin" ]]; then do_setup_darwin; else do_setup_linux; fi ;;
start)
if [[ "$os" == "darwin" ]]; then do_start_darwin; else do_start_linux; fi ;;
stop)
if [[ "$os" == "darwin" ]]; then do_stop_darwin; else do_stop_linux; fi ;;
status)
if [[ "$os" == "darwin" ]]; then do_status_darwin; else do_status_linux; fi ;;
build-image)
do_build_image ;;
uninstall)
if [[ "$os" == "darwin" ]]; then do_uninstall_darwin; else do_uninstall_linux; fi ;;
*)
die "unexpected mode: $MODE" ;;
esac
}
main "$@"