diff --git a/lib/phase10_common.sh b/lib/phase10_common.sh index f9da484..2fab2bf 100644 --- a/lib/phase10_common.sh +++ b/lib/phase10_common.sh @@ -9,6 +9,18 @@ PHASE10_REPO_PATHS=() PHASE10_GITHUB_URLS=() PHASE10_DUPLICATES=() +phase10_repo_index_by_name() { + local repo_name="$1" + local i + for i in "${!PHASE10_REPO_NAMES[@]}"; do + if [[ "${PHASE10_REPO_NAMES[$i]}" == "$repo_name" ]]; then + printf '%s' "$i" + return 0 + fi + done + printf '%s' "-1" +} + # Parse common git remote URL formats into: host|owner|repo # Supports: # - https://host/owner/repo(.git) @@ -89,6 +101,109 @@ phase10_canonical_gitea_url() { printf 'https://%s/%s/%s.git' "$domain" "$org" "$repo" } +# Resolve which local remote currently represents GitHub for this repo path. +# Prefers "github" remote, then "origin". +phase10_find_github_remote_url() { + local repo_path="$1" github_owner="$2" + local github_url="" + + if github_url=$(git -C "$repo_path" remote get-url github 2>/dev/null); then + if phase10_url_is_github_repo "$github_url" "$github_owner"; then + printf '%s' "$github_url" + return 0 + fi + fi + + if github_url=$(git -C "$repo_path" remote get-url origin 2>/dev/null); then + if phase10_url_is_github_repo "$github_url" "$github_owner"; then + printf '%s' "$github_url" + return 0 + fi + fi + + return 1 +} + +# Add or update a discovered repo entry. +# If repo already exists and path differs, explicit path wins. +phase10_upsert_repo_entry() { + local repo_name="$1" repo_path="$2" github_url="$3" + local idx existing_path + + idx="$(phase10_repo_index_by_name "$repo_name")" + if [[ "$idx" -ge 0 ]]; then + existing_path="${PHASE10_REPO_PATHS[$idx]}" + if [[ "$existing_path" != "$repo_path" ]]; then + PHASE10_REPO_PATHS[idx]="$repo_path" + PHASE10_GITHUB_URLS[idx]="$github_url" + log_info "${repo_name}: using explicit include path ${repo_path} (replacing ${existing_path})" + fi + return 0 + fi + + PHASE10_REPO_NAMES+=("$repo_name") + PHASE10_REPO_PATHS+=("$repo_path") + PHASE10_GITHUB_URLS+=("$github_url") + return 0 +} + +# Add one explicitly included repo path into discovery arrays. +# Validates path is a git toplevel and maps to github.com//repo. +phase10_include_repo_path() { + local include_path="$1" github_owner="$2" + local abs_path top github_url parsed host owner repo canonical + + if [[ ! -d "$include_path" ]]; then + log_error "Include path not found: ${include_path}" + return 1 + fi + + abs_path="$(cd "$include_path" && pwd)" + if ! git -C "$abs_path" rev-parse --is-inside-work-tree >/dev/null 2>&1; then + log_error "Include path is not a git repo: ${abs_path}" + return 1 + fi + + top="$(git -C "$abs_path" rev-parse --show-toplevel 2>/dev/null || true)" + if [[ "$top" != "$abs_path" ]]; then + log_error "Include path must be repo root (git toplevel): ${abs_path}" + return 1 + fi + + github_url="$(phase10_find_github_remote_url "$abs_path" "$github_owner" 2>/dev/null || true)" + if [[ -z "$github_url" ]]; then + # Explicit include-path may point to a local repo with no GitHub remote yet. + # In that case, derive the repo slug from folder name and assume GitHub URL. + repo="$(basename "$abs_path")" + canonical="$(phase10_canonical_github_url "$github_owner" "$repo")" + log_warn "Include path has no GitHub remote; assuming ${canonical}" + else + parsed=$(phase10_parse_git_url "$github_url" 2>/dev/null) || { + log_error "Could not parse GitHub remote URL for include path: ${abs_path}" + return 1 + } + IFS='|' read -r host owner repo <<< "$parsed" + canonical="$(phase10_canonical_github_url "$owner" "$repo")" + fi + + phase10_upsert_repo_entry "$repo" "$abs_path" "$canonical" + return 0 +} + +phase10_enforce_expected_count() { + local expected_count="$1" root="$2" + local i + + if [[ "$expected_count" -gt 0 ]] && [[ "${#PHASE10_REPO_NAMES[@]}" -ne "$expected_count" ]]; then + log_error "Expected ${expected_count} local repos under ${root}; found ${#PHASE10_REPO_NAMES[@]}" + for i in "${!PHASE10_REPO_NAMES[@]}"; do + log_error " - ${PHASE10_REPO_NAMES[$i]} -> ${PHASE10_REPO_PATHS[$i]}" + done + return 1 + fi + return 0 +} + # Stable in-place sort by repo name (keeps arrays aligned). phase10_sort_repo_arrays() { local i j tmp @@ -154,18 +269,7 @@ phase10_discover_local_repos() { top=$(git -C "$dir" rev-parse --show-toplevel 2>/dev/null || true) [[ "$top" == "$dir" ]] || continue - github_url="" - if github_url=$(git -C "$dir" remote get-url github 2>/dev/null); then - if ! phase10_url_is_github_repo "$github_url" "$github_owner"; then - github_url="" - fi - fi - - if [[ -z "$github_url" ]] && github_url=$(git -C "$dir" remote get-url origin 2>/dev/null); then - if ! phase10_url_is_github_repo "$github_url" "$github_owner"; then - github_url="" - fi - fi + github_url="$(phase10_find_github_remote_url "$dir" "$github_owner" 2>/dev/null || true)" [[ -n "$github_url" ]] || continue diff --git a/phase10_local_repo_cutover.sh b/phase10_local_repo_cutover.sh index 1d392ca..172692e 100755 --- a/phase10_local_repo_cutover.sh +++ b/phase10_local_repo_cutover.sh @@ -29,17 +29,35 @@ phase_header 10 "Local Repo Remote Cutover" LOCAL_REPO_ROOT="${PHASE10_LOCAL_ROOT:-/Users/s/development}" EXPECTED_REPO_COUNT="${PHASE10_EXPECTED_REPO_COUNT:-3}" +INCLUDE_PATHS=() DRY_RUN=false +FORCE_WITH_LEASE=false ASKPASS_SCRIPT="" PHASE10_GITEA_REPO_EXISTS=false PHASE10_REMOTE_BRANCHES="" PHASE10_REMOTE_TAGS="" +PHASE10_LAST_CURL_ERROR="" +PHASE10_LAST_HTTP_CODE="" +PHASE10_HTTP_CONNECT_TIMEOUT="${PHASE10_HTTP_CONNECT_TIMEOUT:-15}" +PHASE10_HTTP_LOW_SPEED_LIMIT="${PHASE10_HTTP_LOW_SPEED_LIMIT:-1}" +PHASE10_HTTP_LOW_SPEED_TIME="${PHASE10_HTTP_LOW_SPEED_TIME:-30}" +PHASE10_PUSH_TIMEOUT_SEC="${PHASE10_PUSH_TIMEOUT_SEC:-120}" +PHASE10_LSREMOTE_TIMEOUT_SEC="${PHASE10_LSREMOTE_TIMEOUT_SEC:-45}" +PHASE10_API_CONNECT_TIMEOUT_SEC="${PHASE10_API_CONNECT_TIMEOUT_SEC:-8}" +PHASE10_API_MAX_TIME_SEC="${PHASE10_API_MAX_TIME_SEC:-20}" + +if [[ -n "${PHASE10_INCLUDE_PATHS:-}" ]]; then + # Space-delimited list of extra repo roots to include in phase10 discovery. + read -r -a INCLUDE_PATHS <<< "${PHASE10_INCLUDE_PATHS}" +fi for arg in "$@"; do case "$arg" in --local-root=*) LOCAL_REPO_ROOT="${arg#*=}" ;; --expected-count=*) EXPECTED_REPO_COUNT="${arg#*=}" ;; + --include-path=*) INCLUDE_PATHS+=("${arg#*=}") ;; --dry-run) DRY_RUN=true ;; + --force-with-lease) FORCE_WITH_LEASE=true ;; --help|-h) cat </dev/null 2>&1; then + perl -e ' + my $timeout = shift @ARGV; + my $pid = fork(); + if (!defined $pid) { exit 125; } + if ($pid == 0) { + setpgrp(0, 0); + exec @ARGV; + exit 125; + } + my $timed_out = 0; + local $SIG{ALRM} = sub { + $timed_out = 1; + kill "TERM", -$pid; + select(undef, undef, undef, 0.5); + kill "KILL", -$pid; + }; + alarm $timeout; + waitpid($pid, 0); + alarm 0; + if ($timed_out) { exit 124; } + my $rc = $?; + if ($rc == -1) { exit 125; } + if ($rc & 127) { exit(128 + ($rc & 127)); } + exit($rc >> 8); + ' "$timeout_sec" "$@" + else + "$@" + fi +} + +git_with_auth_timed() { + local timeout_sec="$1" + shift + run_with_timeout "$timeout_sec" \ + env \ + GIT_TERMINAL_PROMPT=0 \ + GIT_ASKPASS="$ASKPASS_SCRIPT" \ + GITEA_GIT_USERNAME="$GITEA_ADMIN_USER" \ + GITEA_GIT_TOKEN="$GITEA_ADMIN_TOKEN" \ + "$@" +} + ensure_github_remote() { local repo_path="$1" repo_name="$2" github_url="$3" local existing origin_existing has_bad_github @@ -138,8 +212,14 @@ ensure_github_remote() { return 1 fi - log_error "${repo_name}: could not find GitHub remote in 'origin' or 'github'" - return 1 + # Explicit include-path repos may have no GitHub remote yet. + if [[ "$DRY_RUN" == "true" ]]; then + log_info "${repo_name}: would add github remote -> ${github_url}" + else + git -C "$repo_path" remote add github "$github_url" + log_success "${repo_name}: added github remote (${github_url})" + fi + return 0 } ensure_gitea_origin() { @@ -176,33 +256,76 @@ ensure_gitea_origin() { ensure_gitea_repo_exists() { local repo_name="$1" - local create_payload http_code + local create_payload create_response get_gitea_repo_http_code() { local target_repo="$1" - local tmpfile curl_code + local tmpfile errfile curl_code tmpfile=$(mktemp) + errfile=$(mktemp) curl_code=$(curl \ - -s \ + -sS \ -o "$tmpfile" \ -w "%{http_code}" \ + --connect-timeout "$PHASE10_API_CONNECT_TIMEOUT_SEC" \ + --max-time "$PHASE10_API_MAX_TIME_SEC" \ -H "Authorization: token ${GITEA_ADMIN_TOKEN}" \ -H "Accept: application/json" \ - "${GITEA_INTERNAL_URL}/api/v1/repos/${GITEA_ORG_NAME}/${target_repo}") || { + "${GITEA_INTERNAL_URL}/api/v1/repos/${GITEA_ORG_NAME}/${target_repo}" 2>"$errfile") || { + PHASE10_LAST_CURL_ERROR="$(tr '\n' ' ' < "$errfile" | sed 's/[[:space:]]\+/ /g; s/^ //; s/ $//')" rm -f "$tmpfile" + rm -f "$errfile" return 1 } rm -f "$tmpfile" - printf '%s' "$curl_code" + rm -f "$errfile" + PHASE10_LAST_CURL_ERROR="" + PHASE10_LAST_HTTP_CODE="$curl_code" + return 0 + } + + create_gitea_repo() { + local payload="$1" + local tmpfile errfile curl_code + tmpfile=$(mktemp) + errfile=$(mktemp) + curl_code=$(curl \ + -sS \ + -o "$tmpfile" \ + -w "%{http_code}" \ + --connect-timeout "$PHASE10_API_CONNECT_TIMEOUT_SEC" \ + --max-time "$PHASE10_API_MAX_TIME_SEC" \ + -X POST \ + -H "Authorization: token ${GITEA_ADMIN_TOKEN}" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d "$payload" \ + "${GITEA_INTERNAL_URL}/api/v1/orgs/${GITEA_ORG_NAME}/repos" 2>"$errfile") || { + PHASE10_LAST_CURL_ERROR="$(tr '\n' ' ' < "$errfile" | sed 's/[[:space:]]\+/ /g; s/^ //; s/ $//')" + rm -f "$tmpfile" + rm -f "$errfile" + return 1 + } + create_response="$(cat "$tmpfile")" + rm -f "$tmpfile" + rm -f "$errfile" + PHASE10_LAST_CURL_ERROR="" + PHASE10_LAST_HTTP_CODE="$curl_code" + return 0 } PHASE10_GITEA_REPO_EXISTS=false - if ! http_code="$(get_gitea_repo_http_code "$repo_name")"; then + PHASE10_LAST_CURL_ERROR="" + PHASE10_LAST_HTTP_CODE="" + if ! get_gitea_repo_http_code "$repo_name"; then log_error "${repo_name}: failed to query Gitea API for repo existence" + if [[ -n "$PHASE10_LAST_CURL_ERROR" ]]; then + log_error "${repo_name}: curl error: ${PHASE10_LAST_CURL_ERROR}" + fi return 1 fi - if [[ "$http_code" == "200" ]]; then + if [[ "$PHASE10_LAST_HTTP_CODE" == "200" ]]; then PHASE10_GITEA_REPO_EXISTS=true if [[ "$DRY_RUN" == "true" ]]; then log_info "${repo_name}: Gitea repo already exists (${GITEA_ORG_NAME}/${repo_name})" @@ -210,8 +333,8 @@ ensure_gitea_repo_exists() { return 0 fi - if [[ "$http_code" != "404" ]]; then - log_error "${repo_name}: unexpected Gitea API status while checking repo (${http_code})" + if [[ "$PHASE10_LAST_HTTP_CODE" != "404" ]]; then + log_error "${repo_name}: unexpected Gitea API status while checking repo (${PHASE10_LAST_HTTP_CODE})" return 1 fi @@ -224,12 +347,31 @@ ensure_gitea_repo_exists() { return 0 fi - if gitea_api POST "/orgs/${GITEA_ORG_NAME}/repos" "$create_payload" >/dev/null 2>&1; then + log_info "${repo_name}: creating missing Gitea repo ${GITEA_ORG_NAME}/${repo_name}" + PHASE10_LAST_HTTP_CODE="" + if ! create_gitea_repo "$create_payload"; then + log_error "${repo_name}: failed to create Gitea repo ${GITEA_ORG_NAME}/${repo_name} (network/API call failed)" + if [[ -n "$PHASE10_LAST_CURL_ERROR" ]]; then + log_error "${repo_name}: curl error: ${PHASE10_LAST_CURL_ERROR}" + fi + return 1 + fi + + if [[ "$PHASE10_LAST_HTTP_CODE" == "201" ]]; then log_success "${repo_name}: created missing Gitea repo ${GITEA_ORG_NAME}/${repo_name}" return 0 fi - log_error "${repo_name}: failed to create Gitea repo ${GITEA_ORG_NAME}/${repo_name}" + # If another process created the repo concurrently, treat it as success. + if [[ "$PHASE10_LAST_HTTP_CODE" == "409" ]]; then + log_warn "${repo_name}: Gitea repo already exists (HTTP 409), continuing" + return 0 + fi + + log_error "${repo_name}: failed to create Gitea repo ${GITEA_ORG_NAME}/${repo_name} (HTTP ${PHASE10_LAST_HTTP_CODE})" + if [[ -n "${create_response:-}" ]]; then + log_error "${repo_name}: API response: ${create_response}" + fi return 1 } @@ -249,12 +391,21 @@ list_contains() { fetch_remote_refs() { local url="$1" - local refs ref short + local refs ref short rc PHASE10_REMOTE_BRANCHES="" PHASE10_REMOTE_TAGS="" - refs=$(git_with_auth git ls-remote --heads --tags "$url" 2>/dev/null) || return 1 + refs="$(git_with_auth_timed "$PHASE10_LSREMOTE_TIMEOUT_SEC" \ + git \ + -c "http.connectTimeout=${PHASE10_HTTP_CONNECT_TIMEOUT}" \ + -c "http.lowSpeedLimit=${PHASE10_HTTP_LOW_SPEED_LIMIT}" \ + -c "http.lowSpeedTime=${PHASE10_HTTP_LOW_SPEED_TIME}" \ + ls-remote --heads --tags "$url" 2>/dev/null)" + rc=$? + if [[ "$rc" -ne 0 ]]; then + return 1 + fi [[ -n "$refs" ]] || return 0 while IFS= read -r line; do @@ -352,6 +503,7 @@ dry_run_compare_local_and_remote() { return 0 fi + log_info "${repo_name}: reading remote refs from Gitea (timeout ${PHASE10_LSREMOTE_TIMEOUT_SEC}s)" if ! fetch_remote_refs "$gitea_url"; then log_warn "${repo_name}: could not read Gitea refs via ls-remote; skipping diff" return 0 @@ -367,18 +519,69 @@ dry_run_compare_local_and_remote() { push_all_refs_to_origin() { local repo_path="$1" repo_name="$2" + local push_output push_args push_rc + if [[ "$DRY_RUN" == "true" ]]; then log_info "${repo_name}: would push all branches to origin" log_info "${repo_name}: would push all tags to origin" return 0 fi - if ! git_with_auth git -C "$repo_path" push --all origin >/dev/null; then - log_error "${repo_name}: failed pushing branches to Gitea origin" + push_args=(push --no-verify --all origin) + if [[ "$FORCE_WITH_LEASE" == "true" ]]; then + push_args=(push --no-verify --force-with-lease --all origin) + fi + + log_info "${repo_name}: pushing branches to origin (timeout ${PHASE10_PUSH_TIMEOUT_SEC}s)" + push_output="$(git_with_auth_timed "$PHASE10_PUSH_TIMEOUT_SEC" \ + git \ + -c "http.connectTimeout=${PHASE10_HTTP_CONNECT_TIMEOUT}" \ + -c "http.lowSpeedLimit=${PHASE10_HTTP_LOW_SPEED_LIMIT}" \ + -c "http.lowSpeedTime=${PHASE10_HTTP_LOW_SPEED_TIME}" \ + -C "$repo_path" "${push_args[@]}" 2>&1)" + push_rc=$? + if [[ "$push_rc" -ne 0 ]]; then + if [[ "$push_rc" -eq 124 ]]; then + log_error "${repo_name}: branch push timed out after ${PHASE10_PUSH_TIMEOUT_SEC}s" + log_error "${repo_name}: check network reachability to ${GITEA_DOMAIN} and retry" + return 1 + fi + if [[ "$push_output" == *"non-fast-forward"* ]] || [[ "$push_output" == *"[rejected]"* ]]; then + log_error "${repo_name}: branch push rejected (non-fast-forward)" + log_error "${repo_name}: run with --dry-run first to review diffs, then re-run with --force-with-lease if local should win" + else + log_error "${repo_name}: failed pushing branches to Gitea origin" + fi + printf '%s\n' "$push_output" >&2 return 1 fi - if ! git_with_auth git -C "$repo_path" push --tags origin >/dev/null; then - log_error "${repo_name}: failed pushing tags to Gitea origin" + + push_args=(push --no-verify --tags origin) + if [[ "$FORCE_WITH_LEASE" == "true" ]]; then + push_args=(push --no-verify --force-with-lease --tags origin) + fi + + log_info "${repo_name}: pushing tags to origin (timeout ${PHASE10_PUSH_TIMEOUT_SEC}s)" + push_output="$(git_with_auth_timed "$PHASE10_PUSH_TIMEOUT_SEC" \ + git \ + -c "http.connectTimeout=${PHASE10_HTTP_CONNECT_TIMEOUT}" \ + -c "http.lowSpeedLimit=${PHASE10_HTTP_LOW_SPEED_LIMIT}" \ + -c "http.lowSpeedTime=${PHASE10_HTTP_LOW_SPEED_TIME}" \ + -C "$repo_path" "${push_args[@]}" 2>&1)" + push_rc=$? + if [[ "$push_rc" -ne 0 ]]; then + if [[ "$push_rc" -eq 124 ]]; then + log_error "${repo_name}: tag push timed out after ${PHASE10_PUSH_TIMEOUT_SEC}s" + log_error "${repo_name}: check network reachability to ${GITEA_DOMAIN} and retry" + return 1 + fi + if [[ "$push_output" == *"non-fast-forward"* ]] || [[ "$push_output" == *"[rejected]"* ]]; then + log_error "${repo_name}: tag push rejected (non-fast-forward/conflict)" + log_error "${repo_name}: re-run with --force-with-lease only if replacing remote tags is intended" + else + log_error "${repo_name}: failed pushing tags to Gitea origin" + fi + printf '%s\n' "$push_output" >&2 return 1 fi return 0 @@ -398,7 +601,12 @@ retarget_tracking_to_origin() { if [[ "$DRY_RUN" == "true" ]]; then log_info "${repo_name}: would create origin/${branch} by pushing local ${branch}" else - if ! git_with_auth git -C "$repo_path" push origin "refs/heads/${branch}:refs/heads/${branch}" >/dev/null; then + if ! git_with_auth_timed "$PHASE10_PUSH_TIMEOUT_SEC" \ + git \ + -c "http.connectTimeout=${PHASE10_HTTP_CONNECT_TIMEOUT}" \ + -c "http.lowSpeedLimit=${PHASE10_HTTP_LOW_SPEED_LIMIT}" \ + -c "http.lowSpeedTime=${PHASE10_HTTP_LOW_SPEED_TIME}" \ + -C "$repo_path" push --no-verify origin "refs/heads/${branch}:refs/heads/${branch}" >/dev/null; then log_error "${repo_name}: could not create origin/${branch} while setting tracking" return 1 fi @@ -430,7 +638,20 @@ retarget_tracking_to_origin() { return 0 } -if ! phase10_discover_local_repos "$LOCAL_REPO_ROOT" "$GITHUB_USERNAME" "$SCRIPT_DIR" "$EXPECTED_REPO_COUNT"; then +if ! phase10_discover_local_repos "$LOCAL_REPO_ROOT" "$GITHUB_USERNAME" "$SCRIPT_DIR" 0; then + exit 1 +fi + +for include_path in "${INCLUDE_PATHS[@]}"; do + [[ -z "$include_path" ]] && continue + if ! phase10_include_repo_path "$include_path" "$GITHUB_USERNAME"; then + exit 1 + fi +done + +phase10_sort_repo_arrays + +if ! phase10_enforce_expected_count "$EXPECTED_REPO_COUNT" "$LOCAL_REPO_ROOT"; then exit 1 fi diff --git a/phase10_post_check.sh b/phase10_post_check.sh index ab9d7ae..8eda6fc 100755 --- a/phase10_post_check.sh +++ b/phase10_post_check.sh @@ -20,11 +20,18 @@ phase_header 10 "Local Repo Remote Cutover — Post-Check" LOCAL_REPO_ROOT="${PHASE10_LOCAL_ROOT:-/Users/s/development}" EXPECTED_REPO_COUNT="${PHASE10_EXPECTED_REPO_COUNT:-3}" +INCLUDE_PATHS=() + +if [[ -n "${PHASE10_INCLUDE_PATHS:-}" ]]; then + # Space-delimited list of extra repo roots to include in phase10 discovery. + read -r -a INCLUDE_PATHS <<< "${PHASE10_INCLUDE_PATHS}" +fi for arg in "$@"; do case "$arg" in --local-root=*) LOCAL_REPO_ROOT="${arg#*=}" ;; --expected-count=*) EXPECTED_REPO_COUNT="${arg#*=}" ;; + --include-path=*) INCLUDE_PATHS+=("${arg#*=}") ;; --help|-h) cat <.env instead. + +# GitHub Personal Access Token (classic or fine-grained with "repo" scope). +# Used to generate short-lived registration tokens for each runner container. +# Never stored in the runner agent — only used at container startup. +GITHUB_PAT=ghp_xxxxxxxxxxxxxxxxxxxx diff --git a/runners-conversion/augur/.gitignore b/runners-conversion/augur/.gitignore new file mode 100644 index 0000000..4c86ec5 --- /dev/null +++ b/runners-conversion/augur/.gitignore @@ -0,0 +1,6 @@ +# Ignore real env files (contain secrets like GITHUB_PAT). +# Only .env.example and envs/*.env.example are tracked. +.env +!.env.example +envs/*.env +!envs/*.env.example diff --git a/runners-conversion/augur/Dockerfile b/runners-conversion/augur/Dockerfile new file mode 100644 index 0000000..7305f65 --- /dev/null +++ b/runners-conversion/augur/Dockerfile @@ -0,0 +1,93 @@ +# Dockerfile — GitHub Actions self-hosted runner image. +# +# Includes: Ubuntu 24.04 + Go 1.26 + Node 24 + GitHub Actions runner agent. +# Designed for CI workloads on Linux x64 servers (e.g., Unraid, bare metal). +# +# Build: +# docker build -t augur-runner . +# +# The image is also auto-built and pushed to GHCR by the +# build-runner-image.yml workflow on Dockerfile or entrypoint changes. + +FROM ubuntu:24.04 + +# --- Metadata labels (OCI standard) --- +LABEL org.opencontainers.image.title="augur-runner" \ + org.opencontainers.image.description="GitHub Actions self-hosted runner for augur CI" \ + org.opencontainers.image.source="https://github.com/AIinfusedS/augur" \ + org.opencontainers.image.licenses="Proprietary" + +# --- Layer 1: System packages (changes least often) --- +# Combined into a single layer to minimize image size. +# --no-install-recommends avoids pulling unnecessary packages. +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + curl \ + git \ + jq \ + python3 \ + tini \ + sudo \ + unzip \ + && rm -rf /var/lib/apt/lists/* + +# --- Layer 2: Go 1.26 (pinned version + SHA256 verification) --- +ARG GO_VERSION=1.26.0 +ARG GO_SHA256=aac1b08a0fb0c4e0a7c1555beb7b59180b05dfc5a3d62e40e9de90cd42f88235 +RUN curl -fsSL "https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz" -o /tmp/go.tar.gz && \ + echo "${GO_SHA256} /tmp/go.tar.gz" | sha256sum -c - && \ + tar -C /usr/local -xzf /tmp/go.tar.gz && \ + rm /tmp/go.tar.gz +ENV PATH="/usr/local/go/bin:${PATH}" + +# --- Layer 3: Node 24 LTS via NodeSource --- +ARG NODE_MAJOR=24 +RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_MAJOR}.x | bash - && \ + apt-get install -y --no-install-recommends nodejs && \ + rm -rf /var/lib/apt/lists/* + +# --- Layer 4: Create non-root runner user (UID/GID 1000) --- +# Ubuntu 24.04 ships with an 'ubuntu' user at UID/GID 1000. +# Remove it first, then create our runner user at the same IDs. +RUN userdel -r ubuntu 2>/dev/null || true && \ + groupadd -f -g 1000 runner && \ + useradd -m -u 1000 -g runner -s /bin/bash runner && \ + echo "runner ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/runner + +# --- Layer 5: GitHub Actions runner agent --- +# Downloads the latest runner release for linux-x64. +# The runner agent auto-updates itself between jobs, so pinning +# the exact version here is not critical. +ARG RUNNER_ARCH=x64 +RUN RUNNER_VERSION=$(curl -fsSL https://api.github.com/repos/actions/runner/releases/latest \ + | jq -r '.tag_name' | sed 's/^v//') && \ + curl -fsSL "https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${RUNNER_ARCH}-${RUNNER_VERSION}.tar.gz" \ + -o /tmp/runner.tar.gz && \ + mkdir -p /home/runner/actions-runner && \ + tar -xzf /tmp/runner.tar.gz -C /home/runner/actions-runner && \ + rm /tmp/runner.tar.gz && \ + chown -R runner:runner /home/runner/actions-runner && \ + /home/runner/actions-runner/bin/installdependencies.sh + +# --- Layer 6: Work directory (pre-create for Docker volume ownership) --- +# Docker named volumes inherit ownership from the mount point in the image. +# Creating _work as runner:runner ensures the volume is writable without sudo. +RUN mkdir -p /home/runner/_work && chown runner:runner /home/runner/_work + +# --- Layer 7: Entrypoint (changes most often) --- +COPY --chown=runner:runner entrypoint.sh /home/runner/entrypoint.sh +RUN chmod +x /home/runner/entrypoint.sh + +# --- Runtime configuration --- +USER runner +WORKDIR /home/runner/actions-runner + +# Health check: verify the runner listener process is alive. +# start_period gives time for registration + first job pickup. +HEALTHCHECK --interval=30s --timeout=5s --retries=3 --start-period=30s \ + CMD pgrep -f "Runner.Listener" > /dev/null || exit 1 + +# Use tini as PID 1 for proper signal forwarding and zombie reaping. +ENTRYPOINT ["tini", "--"] +CMD ["/home/runner/entrypoint.sh"] diff --git a/runners-conversion/augur/README.md b/runners-conversion/augur/README.md new file mode 100644 index 0000000..a2b44a5 --- /dev/null +++ b/runners-conversion/augur/README.md @@ -0,0 +1,416 @@ +# Self-Hosted GitHub Actions Runner (Docker) + +Run GitHub Actions CI on your own Linux server instead of GitHub-hosted runners. +Eliminates laptop CPU burden, avoids runner-minute quotas, and gives faster feedback. + +## How It Works + +Each runner container: +1. Starts up, generates a short-lived registration token from your GitHub PAT +2. Registers with GitHub in **ephemeral mode** (one job per lifecycle) +3. Picks up a CI job, executes it, and exits +4. Docker's `restart: unless-stopped` brings it back for the next job + +## Prerequisites + +- Docker Engine 24+ and Docker Compose v2 +- A GitHub Personal Access Token (classic) with **`repo`** and **`read:packages`** scopes +- Network access to `github.com`, `api.github.com`, and `ghcr.io` + +## One-Time GitHub Setup + +Before deploying, the repository needs write permissions for the image build workflow. + +### Enable GHCR image builds + +The `build-runner-image.yml` workflow pushes Docker images to GHCR using the +`GITHUB_TOKEN`. By default, this token is read-only and the workflow will fail +silently (zero steps executed, no runner assigned). + +Fix by allowing write permissions for Actions workflows: + +```bash +gh api -X PUT repos/OWNER/REPO/actions/permissions/workflow \ + -f default_workflow_permissions=write \ + -F can_approve_pull_request_reviews=false +``` + +Alternatively, keep read-only defaults and create a dedicated PAT secret with +`write:packages` scope, then reference it in the workflow instead of `GITHUB_TOKEN`. + +### Build the runner image + +Trigger the GHCR image build (first time and whenever Dockerfile/entrypoint changes): + +```bash +gh workflow run build-runner-image.yml +``` + +Wait for the workflow to complete (~5 min): + +```bash +gh run list --workflow=build-runner-image.yml --limit=1 +``` + +The image is also rebuilt automatically: +- On push to `main` when `infra/runners/Dockerfile` or `entrypoint.sh` changes +- Weekly (Monday 06:00 UTC) to pick up OS patches and runner agent updates + +## Deploy on Your Server + +### Choose an image source + +| Method | Files needed on server | Registry auth? | Best for | +|--------|----------------------|----------------|----------| +| **Self-hosted registry** | `docker-compose.yml`, `.env`, `envs/augur.env` | No (your network) | Production — push once, pull from any machine | +| **GHCR** | `docker-compose.yml`, `.env`, `envs/augur.env` | Yes (`docker login ghcr.io`) | GitHub-native workflow | +| **Build locally** | All 5 files (+ `Dockerfile`, `entrypoint.sh`) | No | Quick start, no registry needed | + +### Option A: Self-hosted registry (recommended) + +For the full end-to-end workflow (build image on your Mac, push to Unraid registry, +start runner), see the [CI Workflow Guide](../../docs/ci-workflows.md#lifecycle-2-offload-ci-to-a-server-unraid). + +The private Docker registry is configured at `infra/registry/`. It listens on port 5000, +accessible from the LAN. Docker treats `localhost` registries as insecure by default — +no `daemon.json` changes needed on the server. To push from another machine, add +`:5000` to `insecure-registries` in that machine's Docker daemon config. + +### Option B: GHCR + +Requires the `build-runner-image.yml` workflow to have run successfully +(see [One-Time GitHub Setup](#one-time-github-setup)). + +```bash +# 1. Copy environment templates +cp .env.example .env +cp envs/augur.env.example envs/augur.env + +# 2. Edit .env — set your GITHUB_PAT +# 3. Edit envs/augur.env — set REPO_URL, RUNNER_NAME, resource limits + +# 4. Authenticate Docker with GHCR (one-time, persists to ~/.docker/config.json) +echo "$GITHUB_PAT" | docker login ghcr.io -u YOUR_GITHUB_USERNAME --password-stdin + +# 5. Pull and start +docker compose pull +docker compose up -d + +# 6. Verify runner is registered +docker compose ps +docker compose logs -f runner-augur +``` + +### Option C: Build locally + +No registry needed — builds the image directly on the target machine. +Requires `Dockerfile` and `entrypoint.sh` alongside the compose file. + +```bash +# 1. Copy environment templates +cp .env.example .env +cp envs/augur.env.example envs/augur.env + +# 2. Edit .env — set your GITHUB_PAT +# 3. Edit envs/augur.env — set REPO_URL, RUNNER_NAME, resource limits + +# 4. Build and start +docker compose up -d --build + +# 5. Verify runner is registered +docker compose ps +docker compose logs -f runner-augur +``` + +### Verify the runner is online in GitHub + +```bash +gh api repos/OWNER/REPO/actions/runners \ + --jq '.runners[] | {name, status, labels: [.labels[].name]}' +``` + +## Activate Self-Hosted CI + +Set the repository variable `CI_RUNS_ON` so the CI workflow targets your runner: + +```bash +gh variable set CI_RUNS_ON --body '["self-hosted", "Linux", "X64"]' +``` + +To revert to GitHub-hosted runners: +```bash +gh variable delete CI_RUNS_ON +``` + +## Configuration + +### Shared Config (`.env`) + +| Variable | Required | Description | +|----------|----------|-------------| +| `GITHUB_PAT` | Yes | GitHub PAT with `repo` + `read:packages` scope | + +### Per-Repo Config (`envs/.env`) + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `REPO_URL` | Yes | — | Full GitHub repository URL | +| `RUNNER_NAME` | Yes | — | Unique runner name within the repo | +| `RUNNER_LABELS` | No | `self-hosted,Linux,X64` | Comma-separated runner labels | +| `RUNNER_GROUP` | No | `default` | Runner group | +| `RUNNER_IMAGE` | No | `ghcr.io/aiinfuseds/augur-runner:latest` | Docker image to use | +| `RUNNER_CPUS` | No | `6` | CPU limit for the container | +| `RUNNER_MEMORY` | No | `12G` | Memory limit for the container | + +## Adding More Repos + +1. Copy the per-repo env template: + ```bash + cp envs/augur.env.example envs/myrepo.env + ``` + +2. Edit `envs/myrepo.env` — set `REPO_URL`, `RUNNER_NAME`, and resource limits. + +3. Add a service block to `docker-compose.yml`: + ```yaml + runner-myrepo: + image: ${RUNNER_IMAGE:-ghcr.io/aiinfuseds/augur-runner:latest} + build: . + env_file: + - .env + - envs/myrepo.env + init: true + read_only: true + tmpfs: + - /tmp:size=2G + security_opt: + - no-new-privileges:true + stop_grace_period: 5m + deploy: + resources: + limits: + cpus: "${RUNNER_CPUS:-6}" + memory: "${RUNNER_MEMORY:-12G}" + restart: unless-stopped + healthcheck: + test: ["CMD", "pgrep", "-f", "Runner.Listener"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 30s + logging: + driver: json-file + options: + max-size: "50m" + max-file: "3" + volumes: + - myrepo-work:/home/runner/_work + ``` + +4. Add the volume at the bottom of `docker-compose.yml`: + ```yaml + volumes: + augur-work: + myrepo-work: + ``` + +5. Start: `docker compose up -d` + +## Scaling + +Run multiple concurrent runners for the same repo: + +```bash +# Scale to 3 runners for augur +docker compose up -d --scale runner-augur=3 +``` + +Each container gets a unique runner name (Docker appends a suffix). +Set `RUNNER_NAME` to a base name like `unraid-augur` — scaled instances become +`unraid-augur-1`, `unraid-augur-2`, etc. + +## Resource Tuning + +Each repo can have different resource limits in its env file: + +```env +# Lightweight repo (linting only) +RUNNER_CPUS=2 +RUNNER_MEMORY=4G + +# Heavy repo (Go builds + extensive tests) +RUNNER_CPUS=8 +RUNNER_MEMORY=16G +``` + +### tmpfs Sizing + +The `/tmp` tmpfs defaults to 2G. If your CI writes large temp files, +increase it in `docker-compose.yml`: + +```yaml +tmpfs: + - /tmp:size=4G +``` + +## Monitoring + +```bash +# Container status and health +docker compose ps + +# Live logs +docker compose logs -f runner-augur + +# Last 50 log lines +docker compose logs --tail 50 runner-augur + +# Resource usage +docker stats runner-augur +``` + +## Updating the Runner Image + +To pull the latest GHCR image: +```bash +docker compose pull +docker compose up -d +``` + +To rebuild locally: +```bash +docker compose build +docker compose up -d +``` + +### Using a Self-Hosted Registry + +See the [CI Workflow Guide](../../docs/ci-workflows.md#lifecycle-2-offload-ci-to-a-server-unraid) +for the full build-push-start workflow with a self-hosted registry. + +## Troubleshooting + +### Image build workflow fails with zero steps + +The `build-runner-image.yml` workflow needs `packages: write` permission. +If the repo's default workflow permissions are read-only, the job fails +instantly (0 steps, no runner assigned). See [One-Time GitHub Setup](#one-time-github-setup). + +### `docker compose pull` returns "access denied" or 403 + +The GHCR package inherits the repository's visibility. For private repos, +authenticate Docker first: + +```bash +echo "$GITHUB_PAT" | docker login ghcr.io -u USERNAME --password-stdin +``` + +Or make the package public: +```bash +gh api -X PATCH /user/packages/container/augur-runner -f visibility=public +``` + +Or skip GHCR entirely and build locally: `docker compose build`. + +### Runner doesn't appear in GitHub + +1. Check logs: `docker compose logs runner-augur` +2. Verify `GITHUB_PAT` has `repo` scope +3. Verify `REPO_URL` is correct (full HTTPS URL) +4. Check network: `docker compose exec runner-augur curl -s https://api.github.com` + +### Runner appears "offline" + +The runner may have exited after a job. Check: +```bash +docker compose ps # Is the container running? +docker compose restart runner-augur # Force restart +``` + +### OOM (Out of Memory) kills + +Increase `RUNNER_MEMORY` in the per-repo env file: +```env +RUNNER_MEMORY=16G +``` + +Then: `docker compose up -d` + +### Stale/ghost runners in GitHub + +Ephemeral runners deregister automatically after each job. If a container +was killed ungracefully (power loss, `docker kill`), the runner may appear +stale. It will auto-expire after a few hours, or remove manually: + +```bash +# List runners +gh api repos/OWNER/REPO/actions/runners --jq '.runners[] | {id, name, status}' + +# Remove stale runner by ID +gh api -X DELETE repos/OWNER/REPO/actions/runners/RUNNER_ID +``` + +### Disk space + +Check work directory volume usage: +```bash +docker system df -v +``` + +Clean up unused volumes: +```bash +docker compose down -v # Remove work volumes +docker volume prune # Remove all unused volumes +``` + +## Unraid Notes + +- **Docker login persistence**: `docker login ghcr.io` writes credentials to + `/root/.docker/config.json`. On Unraid, `/root` is on the USB flash drive + and persists across reboots. Verify with `cat /root/.docker/config.json` + after login. +- **Compose file location**: Place the 3 files (`docker-compose.yml`, `.env`, + `envs/augur.env`) in a share directory (e.g., `/mnt/user/appdata/augur-runner/`). +- **Alternative to GHCR**: If you don't want to deal with registry auth on Unraid, + copy the `Dockerfile` and `entrypoint.sh` alongside the compose file and use + `docker compose up -d --build` instead. No registry needed. + +## Security + +| Measure | Description | +|---------|-------------| +| Ephemeral mode | Fresh runner state per job — no cross-job contamination | +| PAT scope isolation | PAT generates a short-lived registration token; PAT never touches the runner agent | +| Non-root user | Runner process runs as UID 1000, not root | +| no-new-privileges | Prevents privilege escalation via setuid/setgid binaries | +| tini (PID 1) | Proper signal forwarding and zombie process reaping | +| Log rotation | Prevents disk exhaustion from verbose CI output (50MB x 3 files) | + +### PAT Scope + +Use the minimum scope required: +- **Classic token**: `repo` + `read:packages` scopes +- **Fine-grained token**: Repository access → Only select repositories → Read and Write for Administration + +### Network Considerations + +The runner container needs outbound access to: +- `github.com` (clone repos, download actions) +- `api.github.com` (registration, status) +- `ghcr.io` (pull runner image — only if using GHCR) +- Package registries (`proxy.golang.org`, `registry.npmjs.org`, etc.) + +No inbound ports are required. + +## Stopping and Removing + +```bash +# Stop runners (waits for stop_grace_period) +docker compose down + +# Stop and remove work volumes +docker compose down -v + +# Stop, remove volumes, and delete the locally built image +docker compose down -v --rmi local +``` diff --git a/runners-conversion/augur/actions-local.sh b/runners-conversion/augur/actions-local.sh new file mode 100755 index 0000000..7422857 --- /dev/null +++ b/runners-conversion/augur/actions-local.sh @@ -0,0 +1,496 @@ +#!/usr/bin/env bash +# actions-local.sh — Setup/start/stop local GitHub Actions runtime on macOS. +# +# This script prepares and manages local execution of workflows with `act`. +# Default runtime is Colima (free, local Docker daemon). +# +# Typical flow: +# 1) ./scripts/actions-local.sh --mode setup +# 2) ./scripts/actions-local.sh --mode start +# 3) act -W .github/workflows/ci-quality-gates.yml +# 4) ./scripts/actions-local.sh --mode stop + +set -euo pipefail + +MODE="" +RUNTIME="auto" +RUNTIME_EXPLICIT=false +REFRESH_BREW=false + +COLIMA_PROFILE="${AUGUR_ACTIONS_COLIMA_PROFILE:-augur-actions}" +COLIMA_CPU="${AUGUR_ACTIONS_COLIMA_CPU:-4}" +COLIMA_MEMORY_GB="${AUGUR_ACTIONS_COLIMA_MEMORY_GB:-8}" +COLIMA_DISK_GB="${AUGUR_ACTIONS_COLIMA_DISK_GB:-60}" +WAIT_TIMEOUT_SEC="${AUGUR_ACTIONS_WAIT_TIMEOUT_SEC:-180}" + +STATE_DIR="${TMPDIR:-/tmp}" +STATE_FILE="${STATE_DIR%/}/augur-actions-local.state" + +STATE_RUNTIME="" +STATE_PROFILE="" +STATE_STARTED_BY_SCRIPT="0" + +usage() { + cat <<'EOF' +Usage: + ./scripts/actions-local.sh --mode [options] + +Required: + --mode MODE One of: setup, start, stop + +Options: + --runtime RUNTIME Runtime choice: auto, colima, docker-desktop (default: auto) + --refresh-brew In setup mode, force brew metadata refresh even if nothing is missing + --colima-profile NAME Colima profile name (default: augur-actions) + --cpu N Colima CPU count for start (default: 4) + --memory-gb N Colima memory (GB) for start (default: 8) + --disk-gb N Colima disk (GB) for start (default: 60) + -h, --help Show this help + +Examples: + ./scripts/actions-local.sh --mode setup + ./scripts/actions-local.sh --mode start + ./scripts/actions-local.sh --mode start --runtime colima --cpu 6 --memory-gb 12 + ./scripts/actions-local.sh --mode stop + ./scripts/actions-local.sh --mode stop --runtime colima + +Environment overrides: + AUGUR_ACTIONS_COLIMA_PROFILE + AUGUR_ACTIONS_COLIMA_CPU + AUGUR_ACTIONS_COLIMA_MEMORY_GB + AUGUR_ACTIONS_COLIMA_DISK_GB + AUGUR_ACTIONS_WAIT_TIMEOUT_SEC +EOF +} + +log() { + printf '[actions-local] %s\n' "$*" +} + +warn() { + printf '[actions-local] WARNING: %s\n' "$*" >&2 +} + +die() { + printf '[actions-local] ERROR: %s\n' "$*" >&2 + exit 1 +} + +require_cmd() { + local cmd="$1" + command -v "$cmd" >/dev/null 2>&1 || die "required command not found: $cmd" +} + +ensure_macos() { + local os + os="$(uname -s)" + [[ "$os" == "Darwin" ]] || die "This script currently supports macOS only." +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + --mode) + shift + [[ $# -gt 0 ]] || die "--mode requires a value" + MODE="$1" + shift + ;; + --runtime) + shift + [[ $# -gt 0 ]] || die "--runtime requires a value" + RUNTIME="$1" + RUNTIME_EXPLICIT=true + shift + ;; + --refresh-brew) + REFRESH_BREW=true + shift + ;; + --colima-profile) + shift + [[ $# -gt 0 ]] || die "--colima-profile requires a value" + COLIMA_PROFILE="$1" + shift + ;; + --cpu) + shift + [[ $# -gt 0 ]] || die "--cpu requires a value" + COLIMA_CPU="$1" + shift + ;; + --memory-gb) + shift + [[ $# -gt 0 ]] || die "--memory-gb requires a value" + COLIMA_MEMORY_GB="$1" + shift + ;; + --disk-gb) + shift + [[ $# -gt 0 ]] || die "--disk-gb requires a value" + COLIMA_DISK_GB="$1" + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + die "unknown argument: $1" + ;; + esac + done + + [[ -n "$MODE" ]] || die "--mode is required (setup|start|stop)" + case "$MODE" in + setup|start|stop) ;; + *) die "invalid --mode: $MODE (expected setup|start|stop)" ;; + esac + + case "$RUNTIME" in + auto|colima|docker-desktop) ;; + *) die "invalid --runtime: $RUNTIME (expected auto|colima|docker-desktop)" ;; + esac +} + +ensure_command_line_tools() { + if xcode-select -p >/dev/null 2>&1; then + log "Xcode Command Line Tools already installed." + return + fi + + log "Xcode Command Line Tools missing; attempting automated install..." + local marker="/tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress" + local label="" + + touch "$marker" + label="$(softwareupdate -l 2>/dev/null | sed -n 's/^\* Label: //p' | grep 'Command Line Tools' | tail -n1 || true)" + rm -f "$marker" + + if [[ -n "$label" ]]; then + sudo softwareupdate -i "$label" --verbose + sudo xcode-select --switch /Library/Developer/CommandLineTools + else + warn "Could not auto-detect Command Line Tools package; launching GUI installer." + xcode-select --install || true + die "Finish installing Command Line Tools, then re-run setup." + fi + + xcode-select -p >/dev/null 2>&1 || die "Command Line Tools installation did not complete." + log "Xcode Command Line Tools installed." +} + +ensure_homebrew() { + if command -v brew >/dev/null 2>&1; then + log "Homebrew already installed." + else + require_cmd curl + log "Installing Homebrew..." + NONINTERACTIVE=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + fi + + if [[ -x /opt/homebrew/bin/brew ]]; then + eval "$(/opt/homebrew/bin/brew shellenv)" + elif [[ -x /usr/local/bin/brew ]]; then + eval "$(/usr/local/bin/brew shellenv)" + elif command -v brew >/dev/null 2>&1; then + eval "$("$(command -v brew)" shellenv)" + else + die "Homebrew not found after installation." + fi + + log "Homebrew ready: $(brew --version | head -n1)" +} + +install_brew_formula_if_missing() { + local formula="$1" + if brew list --versions "$formula" >/dev/null 2>&1; then + log "Already installed: $formula" + else + log "Installing: $formula" + brew install "$formula" + fi +} + +list_missing_formulas() { + local formulas=("$@") + local -a missing=() + local formula + for formula in "${formulas[@]}"; do + if ! brew list --versions "$formula" >/dev/null 2>&1; then + missing+=("$formula") + fi + done + if [[ "${#missing[@]}" -gt 0 ]]; then + printf '%s\n' "${missing[@]}" + fi +} + +colima_context_name() { + local profile="$1" + if [[ "$profile" == "default" ]]; then + printf 'colima' + else + printf 'colima-%s' "$profile" + fi +} + +colima_is_running() { + local out + out="$(colima status --profile "$COLIMA_PROFILE" 2>&1 || true)" + if printf '%s' "$out" | grep -qi "not running"; then + return 1 + fi + if printf '%s' "$out" | grep -qi "running"; then + return 0 + fi + return 1 +} + +docker_ready() { + docker info >/dev/null 2>&1 +} + +wait_for_docker() { + local waited=0 + while ! docker_ready; do + if (( waited >= WAIT_TIMEOUT_SEC )); then + die "Docker daemon not ready after ${WAIT_TIMEOUT_SEC}s." + fi + sleep 2 + waited=$((waited + 2)) + done +} + +write_state() { + local runtime="$1" + local started="$2" + cat > "$STATE_FILE" </dev/null 2>&1; then + printf 'colima' + return + fi + + if [[ -d "/Applications/Docker.app" ]] || command -v docker >/dev/null 2>&1; then + printf 'docker-desktop' + return + fi + + die "No supported runtime found. Run setup first." +} + +start_colima_runtime() { + require_cmd colima + require_cmd docker + require_cmd act + + local started="0" + if colima_is_running; then + log "Colima profile '${COLIMA_PROFILE}' is already running." + else + log "Starting Colima profile '${COLIMA_PROFILE}' (cpu=${COLIMA_CPU}, memory=${COLIMA_MEMORY_GB}GB, disk=${COLIMA_DISK_GB}GB)..." + colima start --profile "$COLIMA_PROFILE" --cpu "$COLIMA_CPU" --memory "$COLIMA_MEMORY_GB" --disk "$COLIMA_DISK_GB" + started="1" + fi + + local context + context="$(colima_context_name "$COLIMA_PROFILE")" + if docker context ls --format '{{.Name}}' | grep -Fxq "$context"; then + docker context use "$context" >/dev/null 2>&1 || true + fi + + wait_for_docker + write_state "colima" "$started" + + log "Runtime ready (colima)." + log "Try: act -W .github/workflows/ci-quality-gates.yml" +} + +start_docker_desktop_runtime() { + require_cmd docker + require_cmd act + require_cmd open + + local started="0" + if docker_ready; then + log "Docker daemon already running." + else + log "Starting Docker Desktop..." + open -ga Docker + started="1" + fi + + wait_for_docker + write_state "docker-desktop" "$started" + + log "Runtime ready (docker-desktop)." + log "Try: act -W .github/workflows/ci-quality-gates.yml" +} + +stop_colima_runtime() { + require_cmd colima + + if colima_is_running; then + log "Stopping Colima profile '${COLIMA_PROFILE}'..." + colima stop --profile "$COLIMA_PROFILE" + else + log "Colima profile '${COLIMA_PROFILE}' is already stopped." + fi +} + +stop_docker_desktop_runtime() { + require_cmd osascript + + log "Stopping Docker Desktop..." + osascript -e 'quit app "Docker"' >/dev/null 2>&1 || true +} + +do_setup() { + ensure_macos + ensure_command_line_tools + ensure_homebrew + local required_formulas=(git act colima docker) + local missing_formulas=() + local missing_formula + while IFS= read -r missing_formula; do + [[ -n "$missing_formula" ]] || continue + missing_formulas+=("$missing_formula") + done < <(list_missing_formulas "${required_formulas[@]}" || true) + + if [[ "${#missing_formulas[@]}" -eq 0 ]]; then + log "All required formulas already installed: ${required_formulas[*]}" + if [[ "$REFRESH_BREW" == "true" ]]; then + log "Refreshing Homebrew metadata (--refresh-brew)..." + brew update + else + log "Skipping brew update; nothing to install." + fi + log "Setup complete (no changes required)." + log "Next: ./scripts/actions-local.sh --mode start" + return + fi + + log "Missing formulas detected: ${missing_formulas[*]}" + log "Updating Homebrew metadata..." + brew update + + local formula + for formula in "${required_formulas[@]}"; do + install_brew_formula_if_missing "$formula" + done + + log "Setup complete." + log "Next: ./scripts/actions-local.sh --mode start" +} + +do_start() { + ensure_macos + + local selected_runtime="$RUNTIME" + if [[ "$selected_runtime" == "auto" ]]; then + selected_runtime="$(resolve_runtime_auto)" + fi + + case "$selected_runtime" in + colima) + start_colima_runtime + ;; + docker-desktop) + start_docker_desktop_runtime + ;; + *) + die "unsupported runtime: $selected_runtime" + ;; + esac +} + +do_stop() { + ensure_macos + read_state + + local selected_runtime="$RUNTIME" + local should_stop="1" + + if [[ "$selected_runtime" == "auto" ]]; then + if [[ -n "$STATE_RUNTIME" ]]; then + selected_runtime="$STATE_RUNTIME" + if [[ -n "$STATE_PROFILE" ]]; then + COLIMA_PROFILE="$STATE_PROFILE" + fi + if [[ "$STATE_STARTED_BY_SCRIPT" != "1" ]]; then + should_stop="0" + fi + else + if command -v colima >/dev/null 2>&1; then + selected_runtime="colima" + elif [[ -d "/Applications/Docker.app" ]] || command -v docker >/dev/null 2>&1; then + selected_runtime="docker-desktop" + else + log "No local Actions runtime is installed or tracked. Nothing to stop." + return + fi + should_stop="0" + fi + fi + + if [[ "$should_stop" != "1" && "$RUNTIME_EXPLICIT" != "true" ]]; then + log "No runtime started by this script is currently tracked. Nothing to stop." + log "Pass --runtime colima or --runtime docker-desktop to force a stop." + return + fi + + case "$selected_runtime" in + colima) + stop_colima_runtime + ;; + docker-desktop) + stop_docker_desktop_runtime + ;; + *) + die "unsupported runtime: $selected_runtime" + ;; + esac + + if [[ -f "$STATE_FILE" ]]; then + rm -f "$STATE_FILE" + fi + + log "Stop complete." +} + +main() { + parse_args "$@" + + case "$MODE" in + setup) do_setup ;; + start) do_start ;; + stop) do_stop ;; + *) die "unexpected mode: $MODE" ;; + esac +} + +main "$@" diff --git a/runners-conversion/augur/check-browser-parity.sh b/runners-conversion/augur/check-browser-parity.sh new file mode 100755 index 0000000..727da25 --- /dev/null +++ b/runners-conversion/augur/check-browser-parity.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +# check-browser-parity.sh — Verify Chrome/Firefox extension parity for all providers. +# +# Compares all source files between Firefox (-exporter-extension) and Chrome +# (-exporter-chrome) variants. The only allowed difference is the +# browser_specific_settings.gecko block in manifest.json. +# +# Usage: scripts/check-browser-parity.sh + +set -euo pipefail + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +EXT_ROOT="$REPO_ROOT/browser-extensions/history-extensions" + +PROVIDERS=(gemini copilot deepseek grok perplexity poe) + +# Files that must be byte-identical between variants. +PARITY_FILES=( + src/content/content.js + src/lib/export.js + src/lib/popup-core.js + src/lib/popup-utils.js + src/popup/popup.js + src/popup/popup.html + src/popup/popup.css + src/popup/permissions.html +) + +log() { + printf '[parity] %s\n' "$*" +} + +err() { + printf '[parity] FAIL: %s\n' "$*" >&2 +} + +failures=0 +checks=0 + +for provider in "${PROVIDERS[@]}"; do + firefox_dir="$EXT_ROOT/${provider}-exporter-extension" + chrome_dir="$EXT_ROOT/${provider}-exporter-chrome" + + if [[ ! -d "$firefox_dir" ]]; then + err "$provider — Firefox directory missing: $firefox_dir" + failures=$((failures + 1)) + continue + fi + + if [[ ! -d "$chrome_dir" ]]; then + err "$provider — Chrome directory missing: $chrome_dir" + failures=$((failures + 1)) + continue + fi + + for file in "${PARITY_FILES[@]}"; do + checks=$((checks + 1)) + ff_path="$firefox_dir/$file" + cr_path="$chrome_dir/$file" + + if [[ ! -f "$ff_path" ]]; then + err "$provider — Firefox missing: $file" + failures=$((failures + 1)) + continue + fi + + if [[ ! -f "$cr_path" ]]; then + err "$provider — Chrome missing: $file" + failures=$((failures + 1)) + continue + fi + + if ! diff -q "$ff_path" "$cr_path" >/dev/null 2>&1; then + err "$provider — $file differs between Firefox and Chrome" + failures=$((failures + 1)) + fi + done + + # Validate manifest.json: only browser_specific_settings.gecko should differ. + checks=$((checks + 1)) + ff_manifest="$firefox_dir/manifest.json" + cr_manifest="$chrome_dir/manifest.json" + + if [[ ! -f "$ff_manifest" || ! -f "$cr_manifest" ]]; then + err "$provider — manifest.json missing from one or both variants" + failures=$((failures + 1)) + continue + fi + + # Strip browser_specific_settings block from Firefox manifest and normalize + # trailing commas so the remaining JSON structure matches Chrome. + ff_stripped=$(sed '/"browser_specific_settings"/,/^ }/d' "$ff_manifest" | sed '/^$/d' | sed 's/,$//') + cr_stripped=$(sed '/^$/d' "$cr_manifest" | sed 's/,$//') + + if ! diff -q <(echo "$ff_stripped") <(echo "$cr_stripped") >/dev/null 2>&1; then + err "$provider — manifest.json has unexpected differences beyond browser_specific_settings" + failures=$((failures + 1)) + fi +done + +echo "" +passed=$((checks - failures)) +log "Results: ${passed} passed, ${failures} failed (${checks} checks across ${#PROVIDERS[@]} providers)" + +if [[ "$failures" -gt 0 ]]; then + err "Browser parity check failed." + exit 1 +fi + +log "All browser parity checks passed." +exit 0 diff --git a/runners-conversion/augur/check-contract-drift.sh b/runners-conversion/augur/check-contract-drift.sh new file mode 100755 index 0000000..247b726 --- /dev/null +++ b/runners-conversion/augur/check-contract-drift.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +# check-contract-drift.sh — Enforce Constitution Principle V (contracts stay in lock-step). +# +# Fails when boundary-signature changes are detected under internal layers without +# any update under contracts/*.md in the same diff range. + +set -euo pipefail + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$REPO_ROOT" + +log() { + printf '[contract-drift] %s\n' "$*" +} + +err() { + printf '[contract-drift] ERROR: %s\n' "$*" >&2 +} + +resolve_range() { + if [[ -n "${AUGUR_CONTRACT_DRIFT_RANGE:-}" ]]; then + printf '%s' "$AUGUR_CONTRACT_DRIFT_RANGE" + return 0 + fi + + if [[ -n "${GITHUB_BASE_REF:-}" ]]; then + git fetch --no-tags --depth=1 origin "$GITHUB_BASE_REF" >/dev/null 2>&1 || true + printf 'origin/%s...HEAD' "$GITHUB_BASE_REF" + return 0 + fi + + if [[ -n "${GITHUB_EVENT_BEFORE:-}" ]] && [[ -n "${GITHUB_SHA:-}" ]] && [[ "$GITHUB_EVENT_BEFORE" != "0000000000000000000000000000000000000000" ]]; then + printf '%s...%s' "$GITHUB_EVENT_BEFORE" "$GITHUB_SHA" + return 0 + fi + + if git rev-parse --verify HEAD~1 >/dev/null 2>&1; then + printf 'HEAD~1...HEAD' + return 0 + fi + + printf '' +} + +USE_WORKTREE="${AUGUR_CONTRACT_DRIFT_USE_WORKTREE:-0}" +RANGE="" +if [[ "$USE_WORKTREE" == "1" ]]; then + log "Diff source: working tree (HEAD -> working tree)" + changed_files="$(git diff --name-only)" +else + RANGE="$(resolve_range)" + if [[ -z "$RANGE" ]]; then + log "No diff range could be resolved; skipping contract drift check." + exit 0 + fi + log "Diff range: $RANGE" + changed_files="$(git diff --name-only "$RANGE")" +fi + +if [[ -z "$changed_files" ]]; then + log "No changed files in range; skipping." + exit 0 +fi + +if printf '%s\n' "$changed_files" | grep -Eq '^contracts/.*\.md$'; then + log "Contract files changed in range; check passed." + exit 0 +fi + +# Boundary-sensitive files that define cross-layer contracts. +boundary_files="$(printf '%s\n' "$changed_files" | grep -E '^internal/(cli|service|provider|storage|sync|model)/.*\.go$' || true)" + +if [[ -z "$boundary_files" ]]; then + log "No boundary-sensitive Go files changed; check passed." + exit 0 +fi + +violations=() + +while IFS= read -r file; do + [[ -z "$file" ]] && continue + + # Canonical model and provider interface are always contract-relevant. + if [[ "$file" == "internal/model/conversation.go" ]] || [[ "$file" == "internal/provider/provider.go" ]]; then + violations+=("$file") + continue + fi + + # Heuristic: exported symbol signature/shape changes in boundary layers are contract-relevant. + # Matches exported funcs, exported interfaces, and exported struct fields with JSON tags. + diff_output="" + if [[ "$USE_WORKTREE" == "1" ]]; then + diff_output="$(git diff -U0 -- "$file")" + else + diff_output="$(git diff -U0 "$RANGE" -- "$file")" + fi + + if printf '%s\n' "$diff_output" | grep -Eq '^[+-](func (\([^)]*\) )?[A-Z][A-Za-z0-9_]*\(|type [A-Z][A-Za-z0-9_]* interface|[[:space:]]+[A-Z][A-Za-z0-9_]*[[:space:]].*`json:"[^"]+"`)'; then + violations+=("$file") + fi +done <<< "$boundary_files" + +if [[ "${#violations[@]}" -eq 0 ]]; then + log "No contract-relevant signature drift detected; check passed." + exit 0 +fi + +err "Contract drift detected: contract-relevant files changed without contracts/*.md updates." +err "Update the applicable contract file(s) in contracts/ in the same change." +err "Impacted files:" +for file in "${violations[@]}"; do + err " - $file" +done + +exit 1 diff --git a/runners-conversion/augur/check-coverage-thresholds.sh b/runners-conversion/augur/check-coverage-thresholds.sh new file mode 100755 index 0000000..21d1007 --- /dev/null +++ b/runners-conversion/augur/check-coverage-thresholds.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +# check-coverage-thresholds.sh — Enforce minimum test coverage for critical packages. +# +# Runs `go test -cover` on specified packages and fails if any package +# drops below its defined minimum coverage threshold. +# +# Usage: scripts/check-coverage-thresholds.sh + +set -euo pipefail + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$REPO_ROOT" + +log() { + printf '[coverage] %s\n' "$*" +} + +err() { + printf '[coverage] FAIL: %s\n' "$*" >&2 +} + +# Package thresholds: "package_path:minimum_percent" +# Set ~2% below current values to catch regressions without blocking on noise. +THRESHOLDS=( + "internal/sync:70" + "internal/storage:60" + "internal/service:50" + "internal/service/conversion:80" + "internal/cli:30" + "internal/model:40" +) + +failures=0 +passes=0 + +for entry in "${THRESHOLDS[@]}"; do + pkg="${entry%%:*}" + threshold="${entry##*:}" + + # Run go test with coverage and extract percentage + output=$(go test -cover "./$pkg" 2>&1) || { + err "$pkg — tests failed" + failures=$((failures + 1)) + continue + } + + # Extract coverage percentage (e.g., "coverage: 72.1% of statements") + coverage=$(echo "$output" | grep -oE 'coverage: [0-9]+\.[0-9]+%' | grep -oE '[0-9]+\.[0-9]+' || echo "0.0") + + if [[ -z "$coverage" || "$coverage" == "0.0" ]]; then + # Package might have no test files or no statements + if echo "$output" | grep -q '\[no test files\]'; then + err "$pkg — no test files (threshold: ${threshold}%)" + failures=$((failures + 1)) + else + err "$pkg — could not determine coverage (threshold: ${threshold}%)" + failures=$((failures + 1)) + fi + continue + fi + + # Compare using awk for floating-point comparison + passed=$(awk "BEGIN { print ($coverage >= $threshold) ? 1 : 0 }") + + if [[ "$passed" -eq 1 ]]; then + log "$pkg: ${coverage}% >= ${threshold}% threshold" + passes=$((passes + 1)) + else + err "$pkg: ${coverage}% < ${threshold}% threshold" + failures=$((failures + 1)) + fi +done + +echo "" +log "Results: ${passes} passed, ${failures} failed (${#THRESHOLDS[@]} packages checked)" + +if [[ "$failures" -gt 0 ]]; then + err "Coverage threshold check failed." + exit 1 +fi + +log "All coverage thresholds met." +exit 0 diff --git a/runners-conversion/augur/ci-local.sh b/runners-conversion/augur/ci-local.sh new file mode 100755 index 0000000..2b9e4a5 --- /dev/null +++ b/runners-conversion/augur/ci-local.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash +# ci-local.sh — Run augur CI quality gates locally. +# Mirrors .github/workflows/ci-quality-gates.yml without GitHub-hosted runners. + +set -euo pipefail + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$REPO_ROOT" + +run_contracts=false +run_backend=false +run_extensions=false +explicit_stage=false +skip_install=false + +declare -a suites=() +declare -a default_suites=( + "tests" + "tests-copilot" + "tests-deepseek" + "tests-perplexity" + "tests-grok" + "tests-poe" +) + +usage() { + cat <<'EOF' +Usage: ./scripts/ci-local.sh [options] + +Runs local CI gates equivalent to .github/workflows/ci-quality-gates.yml: + 1) contracts -> scripts/check-contract-drift.sh + 2) backend -> go mod download, go vet ./..., go test ./... -count=1 + 3) extensions -> npm ci + npm test in each extension test suite + +If no stage options are provided, all stages run. + +Options: + --contracts Run contracts drift check stage + --backend Run backend Go stage + --extensions Run extension Jest stage + --suite NAME Extension suite name (repeatable), e.g. tests-deepseek + --skip-install Skip dependency install steps (go mod download, npm ci) + -h, --help Show this help + +Examples: + ./scripts/ci-local.sh + ./scripts/ci-local.sh --backend + ./scripts/ci-local.sh --extensions --suite tests --suite tests-copilot + ./scripts/ci-local.sh --contracts --backend --skip-install +EOF +} + +log() { + printf '[ci-local] %s\n' "$*" +} + +die() { + printf '[ci-local] ERROR: %s\n' "$*" >&2 + exit 1 +} + +require_cmd() { + local cmd="$1" + command -v "$cmd" >/dev/null 2>&1 || die "required command not found: $cmd" +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + --contracts) + explicit_stage=true + run_contracts=true + shift + ;; + --backend) + explicit_stage=true + run_backend=true + shift + ;; + --extensions) + explicit_stage=true + run_extensions=true + shift + ;; + --suite) + shift + [[ $# -gt 0 ]] || die "--suite requires a value" + suites+=("$1") + shift + ;; + --skip-install) + skip_install=true + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + die "unknown argument: $1" + ;; + esac + done + + if [[ "$explicit_stage" == false ]]; then + run_contracts=true + run_backend=true + run_extensions=true + fi + + if [[ "${#suites[@]}" -eq 0 ]]; then + suites=("${default_suites[@]}") + fi +} + +run_contracts_stage() { + require_cmd git + log "Stage: contracts" + AUGUR_CONTRACT_DRIFT_USE_WORKTREE=1 scripts/check-contract-drift.sh +} + +run_backend_stage() { + require_cmd go + log "Stage: backend" + if [[ "$skip_install" == false ]]; then + go mod download + fi + go vet ./... + go test ./... -count=1 +} + +run_extensions_stage() { + require_cmd npm + log "Stage: extensions" + for suite in "${suites[@]}"; do + local suite_dir="$REPO_ROOT/browser-extensions/history-extensions/$suite" + [[ -d "$suite_dir" ]] || die "extension suite directory not found: $suite_dir" + log "Suite: $suite" + if [[ "$skip_install" == false ]]; then + (cd "$suite_dir" && npm ci) + fi + (cd "$suite_dir" && npm test -- --runInBand) + done +} + +main() { + parse_args "$@" + + local started_at + started_at="$(date +%s)" + log "Starting local CI pipeline in $REPO_ROOT" + + if [[ "$run_contracts" == true ]]; then + run_contracts_stage + fi + + if [[ "$run_backend" == true ]]; then + run_backend_stage + fi + + if [[ "$run_extensions" == true ]]; then + run_extensions_stage + fi + + local ended_at duration + ended_at="$(date +%s)" + duration="$((ended_at - started_at))" + log "All selected stages passed (${duration}s)." +} + +main "$@" diff --git a/runners-conversion/augur/docker-compose.yml b/runners-conversion/augur/docker-compose.yml new file mode 100644 index 0000000..ff380c5 --- /dev/null +++ b/runners-conversion/augur/docker-compose.yml @@ -0,0 +1,69 @@ +# docker-compose.yml — GitHub Actions self-hosted runner orchestration. +# +# All configuration is injected via environment files: +# - .env → shared config (GITHUB_PAT) +# - envs/augur.env → per-repo config (identity, labels, resource limits) +# +# Quick start: +# cp .env.example .env && cp envs/augur.env.example envs/augur.env +# # Edit both files with your values +# docker compose up -d +# +# To add another repo: copy envs/augur.env.example to envs/.env, +# fill in values, and add a matching service block below. + +x-runner-common: &runner-common + image: ${RUNNER_IMAGE:-ghcr.io/aiinfuseds/augur-runner:latest} + build: . + env_file: + - .env + - envs/augur.env + tmpfs: + - /tmp:size=2G,exec + security_opt: + - no-new-privileges:true + stop_grace_period: 5m + deploy: + resources: + limits: + cpus: "${RUNNER_CPUS:-4}" + memory: "${RUNNER_MEMORY:-4G}" + restart: unless-stopped + healthcheck: + test: ["CMD", "pgrep", "-f", "Runner.Listener"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 30s + logging: + driver: json-file + options: + max-size: "50m" + max-file: "3" + +services: + runner-augur-1: + <<: *runner-common + environment: + RUNNER_NAME: unraid-augur-1 + volumes: + - augur-work-1:/home/runner/_work + + runner-augur-2: + <<: *runner-common + environment: + RUNNER_NAME: unraid-augur-2 + volumes: + - augur-work-2:/home/runner/_work + + runner-augur-3: + <<: *runner-common + environment: + RUNNER_NAME: unraid-augur-3 + volumes: + - augur-work-3:/home/runner/_work + +volumes: + augur-work-1: + augur-work-2: + augur-work-3: diff --git a/runners-conversion/augur/entrypoint.sh b/runners-conversion/augur/entrypoint.sh new file mode 100755 index 0000000..47d3b0e --- /dev/null +++ b/runners-conversion/augur/entrypoint.sh @@ -0,0 +1,161 @@ +#!/usr/bin/env bash +# entrypoint.sh — Container startup script for the GitHub Actions runner. +# +# Lifecycle: +# 1. Validate required environment variables +# 2. Generate a short-lived registration token from GITHUB_PAT +# 3. Configure the runner in ephemeral mode (one job, then exit) +# 4. Trap SIGTERM/SIGINT for graceful deregistration +# 5. Start the runner (run.sh) +# +# Docker's restart policy (restart: unless-stopped) brings the container +# back after each job completes, repeating this cycle. + +set -euo pipefail + +# --------------------------------------------------------------------------- +# Configuration +# --------------------------------------------------------------------------- + +RUNNER_DIR="/home/runner/actions-runner" +RUNNER_LABELS="${RUNNER_LABELS:-self-hosted,Linux,X64}" +RUNNER_GROUP="${RUNNER_GROUP:-default}" + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +log() { + printf '[entrypoint] %s\n' "$*" +} + +die() { + printf '[entrypoint] ERROR: %s\n' "$*" >&2 + exit 1 +} + +# --------------------------------------------------------------------------- +# Environment validation — fail fast with clear errors +# --------------------------------------------------------------------------- + +validate_env() { + local missing=() + + [[ -z "${GITHUB_PAT:-}" ]] && missing+=("GITHUB_PAT") + [[ -z "${REPO_URL:-}" ]] && missing+=("REPO_URL") + [[ -z "${RUNNER_NAME:-}" ]] && missing+=("RUNNER_NAME") + + if [[ ${#missing[@]} -gt 0 ]]; then + die "Missing required environment variables: ${missing[*]}. Check your .env and envs/*.env files." + fi +} + +# --------------------------------------------------------------------------- +# Token generation — PAT → short-lived registration token +# --------------------------------------------------------------------------- + +generate_token() { + # Extract OWNER/REPO from the full URL. + # Supports: https://github.com/OWNER/REPO or https://github.com/OWNER/REPO.git + local repo_slug + repo_slug="$(printf '%s' "$REPO_URL" \ + | sed -E 's#^https?://github\.com/##' \ + | sed -E 's/\.git$//')" + + if [[ -z "$repo_slug" ]] || ! printf '%s' "$repo_slug" | grep -qE '^[^/]+/[^/]+$'; then + die "Could not parse OWNER/REPO from REPO_URL: $REPO_URL" + fi + + log "Generating registration token for ${repo_slug}..." + + local response + response="$(curl -fsSL \ + -X POST \ + -H "Authorization: token ${GITHUB_PAT}" \ + -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/${repo_slug}/actions/runners/registration-token")" + + REG_TOKEN="$(printf '%s' "$response" | jq -r '.token // empty')" + + if [[ -z "$REG_TOKEN" ]]; then + die "Failed to generate registration token. Check that GITHUB_PAT has 'repo' scope and is valid." + fi + + log "Registration token obtained (expires in 1 hour)." +} + +# --------------------------------------------------------------------------- +# Cleanup — deregister runner on container stop +# --------------------------------------------------------------------------- + +cleanup() { + log "Caught signal, removing runner registration..." + + # Generate a removal token (different from registration token) + local repo_slug + repo_slug="$(printf '%s' "$REPO_URL" \ + | sed -E 's#^https?://github\.com/##' \ + | sed -E 's/\.git$//')" + + local remove_token + remove_token="$(curl -fsSL \ + -X POST \ + -H "Authorization: token ${GITHUB_PAT}" \ + -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/${repo_slug}/actions/runners/remove-token" \ + | jq -r '.token // empty' || true)" + + if [[ -n "$remove_token" ]]; then + "${RUNNER_DIR}/config.sh" remove --token "$remove_token" 2>/dev/null || true + log "Runner deregistered." + else + log "WARNING: Could not obtain removal token. Runner may appear stale in GitHub until it expires." + fi + + exit 0 +} + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +main() { + validate_env + generate_token + + # Trap signals for graceful shutdown + trap cleanup SIGTERM SIGINT + + # Remove stale configuration from previous run. + # On container restart (vs recreate), the runner's writable layer persists + # and config.sh refuses to re-configure if .runner already exists. + # The --replace flag only handles server-side name conflicts, not this local check. + if [[ -f "${RUNNER_DIR}/.runner" ]]; then + log "Removing stale runner configuration from previous run..." + rm -f "${RUNNER_DIR}/.runner" "${RUNNER_DIR}/.credentials" "${RUNNER_DIR}/.credentials_rsaparams" + fi + + log "Configuring runner '${RUNNER_NAME}' for ${REPO_URL}..." + log "Labels: ${RUNNER_LABELS}" + log "Group: ${RUNNER_GROUP}" + + "${RUNNER_DIR}/config.sh" \ + --url "${REPO_URL}" \ + --token "${REG_TOKEN}" \ + --name "${RUNNER_NAME}" \ + --labels "${RUNNER_LABELS}" \ + --runnergroup "${RUNNER_GROUP}" \ + --work "/home/runner/_work" \ + --ephemeral \ + --unattended \ + --replace + + log "Runner configured. Starting..." + + # exec replaces the shell with the runner process. + # The runner picks up one job, executes it, and exits. + # Docker's restart policy restarts the container for the next job. + exec "${RUNNER_DIR}/run.sh" +} + +main "$@" diff --git a/runners-conversion/augur/envs/augur.env.example b/runners-conversion/augur/envs/augur.env.example new file mode 100644 index 0000000..52c6c02 --- /dev/null +++ b/runners-conversion/augur/envs/augur.env.example @@ -0,0 +1,28 @@ +# augur.env — Per-repo runner configuration for the augur repository. +# +# Copy this file to augur.env and fill in your values: +# cp envs/augur.env.example envs/augur.env +# +# To add another repo, copy this file to envs/.env, adjust the values, +# and add a matching service block in docker-compose.yml. + +# Runner image source (default: GHCR). +# For self-hosted registry on the same Docker engine: +# RUNNER_IMAGE=localhost:5000/augur-runner:latest +# Docker treats localhost registries as insecure by default — no daemon.json changes needed. +# RUNNER_IMAGE=ghcr.io/aiinfuseds/augur-runner:latest + +# Repository to register this runner with. +REPO_URL=https://github.com/AIinfusedS/augur + +# Runner identity — must be unique per runner within the repo. +RUNNER_NAME=unraid-augur +RUNNER_LABELS=self-hosted,Linux,X64 +RUNNER_GROUP=default + +# Resource limits for this repo's runner container. +# Tune based on the repo's CI workload. +# augur CI needs ~4 CPUs and ~4GB RAM for Go builds + extension tests. +# 3 runners x 4 CPUs = 12 cores total. +RUNNER_CPUS=4 +RUNNER_MEMORY=4G diff --git a/runners-conversion/augur/runner.sh b/runners-conversion/augur/runner.sh new file mode 100755 index 0000000..ef1c9f3 --- /dev/null +++ b/runners-conversion/augur/runner.sh @@ -0,0 +1,744 @@ +#!/usr/bin/env bash +# runner.sh — Setup, manage, and tear down a GitHub Actions self-hosted runner. +# +# Supports two platforms: +# - macOS: Installs the runner agent natively, manages it as a launchd service. +# - Linux: Delegates to Docker-based runner infrastructure in infra/runners/. +# +# Typical flow: +# 1) ./scripts/runner.sh --mode setup # install/configure runner +# 2) ./scripts/runner.sh --mode status # verify runner is online +# 3) (push/PR triggers CI on the self-hosted runner) +# 4) ./scripts/runner.sh --mode stop # stop runner +# 5) ./scripts/runner.sh --mode uninstall # deregister and clean up + +set -euo pipefail + +MODE="" +RUNNER_DIR="${AUGUR_RUNNER_DIR:-${HOME}/.augur-runner}" +RUNNER_LABELS="self-hosted,macOS,ARM64" +RUNNER_NAME="" +REPO_SLUG="" +REG_TOKEN="" +FORCE=false +FOREGROUND=false +PUSH_REGISTRY="" + +PLIST_LABEL="com.augur.actions-runner" +PLIST_PATH="${HOME}/Library/LaunchAgents/${PLIST_LABEL}.plist" + +# Resolved during Linux operations +INFRA_DIR="" + +usage() { + cat <<'EOF' +Usage: + ./scripts/runner.sh --mode [options] + +Required: + --mode MODE One of: setup, start, stop, status, build-image, uninstall + +Options (macOS): + --runner-dir DIR Installation directory (default: ~/.augur-runner) + --labels LABELS Comma-separated labels (default: self-hosted,macOS,ARM64) + --name NAME Runner name (default: augur-) + --repo OWNER/REPO GitHub repository (default: auto-detected from git remote) + --token TOKEN Registration/removal token (prompted if not provided) + --force Force re-setup even if already configured + --foreground Start in foreground instead of launchd service + +Options (Linux — Docker mode): + On Linux, this script delegates to Docker Compose in infra/runners/. + Configuration is managed via .env and envs/*.env files. + See infra/runners/README.md for details. + +Options (build-image): + --push REGISTRY Tag and push to a registry (e.g. 192.168.1.82:5000) + +Common: + -h, --help Show this help + +Examples (macOS): + ./scripts/runner.sh --mode setup + ./scripts/runner.sh --mode setup --token ghp_xxxxx + ./scripts/runner.sh --mode start + ./scripts/runner.sh --mode start --foreground + ./scripts/runner.sh --mode status + ./scripts/runner.sh --mode stop + ./scripts/runner.sh --mode uninstall + +Examples (Linux): + ./scripts/runner.sh --mode setup # prompts for .env, starts runner + ./scripts/runner.sh --mode start # docker compose up -d + ./scripts/runner.sh --mode stop # docker compose down + ./scripts/runner.sh --mode status # docker compose ps + logs + ./scripts/runner.sh --mode uninstall # docker compose down -v --rmi local + +Examples (build-image — works on any OS): + ./scripts/runner.sh --mode build-image # build locally + ./scripts/runner.sh --mode build-image --push 192.168.1.82:5000 # build + push to registry + +Environment overrides: + AUGUR_RUNNER_DIR Runner installation directory (macOS only) +EOF +} + +# --------------------------------------------------------------------------- +# Helpers (consistent with actions-local.sh) +# --------------------------------------------------------------------------- + +log() { + printf '[runner] %s\n' "$*" +} + +warn() { + printf '[runner] WARNING: %s\n' "$*" >&2 +} + +die() { + printf '[runner] ERROR: %s\n' "$*" >&2 + exit 1 +} + +require_cmd() { + local cmd="$1" + command -v "$cmd" >/dev/null 2>&1 || die "required command not found: $cmd" +} + +# --------------------------------------------------------------------------- +# Platform detection +# --------------------------------------------------------------------------- + +detect_os() { + case "$(uname -s)" in + Darwin) printf 'darwin' ;; + Linux) printf 'linux' ;; + *) die "Unsupported OS: $(uname -s). This script supports macOS and Linux." ;; + esac +} + +ensure_macos() { + [[ "$(detect_os)" == "darwin" ]] || die "This operation requires macOS." +} + +# Locate the infra/runners/ directory relative to the repo root. +# The script lives at scripts/runner.sh, so repo root is one level up. +find_infra_dir() { + local script_dir + script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + local repo_root="${script_dir}/.." + INFRA_DIR="$(cd "${repo_root}/infra/runners" 2>/dev/null && pwd)" || true + + if [[ -z "$INFRA_DIR" ]] || [[ ! -f "${INFRA_DIR}/docker-compose.yml" ]]; then + die "Could not find infra/runners/docker-compose.yml. Ensure you are running from the augur repo." + fi +} + +# --------------------------------------------------------------------------- +# Argument parsing +# --------------------------------------------------------------------------- + +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + --mode) + shift; [[ $# -gt 0 ]] || die "--mode requires a value" + MODE="$1"; shift ;; + --runner-dir) + shift; [[ $# -gt 0 ]] || die "--runner-dir requires a value" + RUNNER_DIR="$1"; shift ;; + --labels) + shift; [[ $# -gt 0 ]] || die "--labels requires a value" + RUNNER_LABELS="$1"; shift ;; + --name) + shift; [[ $# -gt 0 ]] || die "--name requires a value" + RUNNER_NAME="$1"; shift ;; + --repo) + shift; [[ $# -gt 0 ]] || die "--repo requires a value" + REPO_SLUG="$1"; shift ;; + --token) + shift; [[ $# -gt 0 ]] || die "--token requires a value" + REG_TOKEN="$1"; shift ;; + --force) + FORCE=true; shift ;; + --foreground) + FOREGROUND=true; shift ;; + --push) + shift; [[ $# -gt 0 ]] || die "--push requires a registry address (e.g. 192.168.1.82:5000)" + PUSH_REGISTRY="$1"; shift ;; + -h|--help) + usage; exit 0 ;; + *) + die "unknown argument: $1" ;; + esac + done + + [[ -n "$MODE" ]] || die "--mode is required (setup|start|stop|status|build-image|uninstall)" + case "$MODE" in + setup|start|stop|status|build-image|uninstall) ;; + *) die "invalid --mode: $MODE (expected setup|start|stop|status|build-image|uninstall)" ;; + esac +} + +# --------------------------------------------------------------------------- +# Repo detection +# --------------------------------------------------------------------------- + +detect_repo() { + if [[ -n "$REPO_SLUG" ]]; then + return + fi + + local remote_url="" + remote_url="$(git remote get-url origin 2>/dev/null || true)" + if [[ -z "$remote_url" ]]; then + die "Could not detect repository from git remote. Use --repo OWNER/REPO." + fi + + # Extract OWNER/REPO from HTTPS or SSH URLs + REPO_SLUG="$(printf '%s' "$remote_url" \ + | sed -E 's#^(https?://github\.com/|git@github\.com:)##' \ + | sed -E 's/\.git$//')" + + if [[ -z "$REPO_SLUG" ]] || ! printf '%s' "$REPO_SLUG" | grep -qE '^[^/]+/[^/]+$'; then + die "Could not parse OWNER/REPO from remote URL: $remote_url. Use --repo OWNER/REPO." + fi + + log "Auto-detected repository: $REPO_SLUG" +} + +# =========================================================================== +# macOS: Native runner agent + launchd service +# =========================================================================== + +# --------------------------------------------------------------------------- +# Runner download and verification (macOS) +# --------------------------------------------------------------------------- + +detect_arch() { + local arch + arch="$(uname -m)" + case "$arch" in + arm64|aarch64) printf 'arm64' ;; + x86_64) printf 'x64' ;; + *) die "Unsupported architecture: $arch" ;; + esac +} + +download_runner() { + require_cmd curl + require_cmd shasum + require_cmd tar + + local arch + arch="$(detect_arch)" + + log "Fetching latest runner release metadata..." + local release_json + release_json="$(curl -fsSL "https://api.github.com/repos/actions/runner/releases/latest")" + + local version + version="$(printf '%s' "$release_json" | grep '"tag_name"' | sed -E 's/.*"v([^"]+)".*/\1/')" + if [[ -z "$version" ]]; then + die "Could not determine latest runner version from GitHub API." + fi + log "Latest runner version: $version" + + local tarball="actions-runner-osx-${arch}-${version}.tar.gz" + local download_url="https://github.com/actions/runner/releases/download/v${version}/${tarball}" + + # Extract expected SHA256 from release body. + # The body contains HTML comments like: + # HASH + local sha_marker="osx-${arch}" + local expected_sha="" + expected_sha="$(printf '%s' "$release_json" \ + | python3 -c " +import json,sys,re +body = json.load(sys.stdin).get('body','') +m = re.search(r'([0-9a-f]{64})', body) +print(m.group(1) if m else '') +" 2>/dev/null || true)" + + mkdir -p "$RUNNER_DIR" + local dest="${RUNNER_DIR}/${tarball}" + + if [[ -f "$dest" ]]; then + log "Tarball already exists: $dest" + else + log "Downloading: $download_url" + curl -fSL -o "$dest" "$download_url" + fi + + if [[ -n "$expected_sha" ]]; then + log "Verifying SHA256 checksum..." + local actual_sha + actual_sha="$(shasum -a 256 "$dest" | awk '{print $1}')" + if [[ "$actual_sha" != "$expected_sha" ]]; then + rm -f "$dest" + die "Checksum mismatch. Expected: $expected_sha, Got: $actual_sha" + fi + log "Checksum verified." + else + warn "Could not extract expected SHA256 from release metadata; skipping verification." + fi + + log "Extracting runner into $RUNNER_DIR..." + tar -xzf "$dest" -C "$RUNNER_DIR" + rm -f "$dest" + + log "Runner extracted (version $version)." +} + +# --------------------------------------------------------------------------- +# Registration (macOS) +# --------------------------------------------------------------------------- + +prompt_token() { + if [[ -n "$REG_TOKEN" ]]; then + return + fi + + log "" + log "A registration token is required." + log "Obtain one from: https://github.com/${REPO_SLUG}/settings/actions/runners/new" + log "Or via the API:" + log " curl -X POST -H 'Authorization: token YOUR_PAT' \\" + log " https://api.github.com/repos/${REPO_SLUG}/actions/runners/registration-token" + log "" + printf '[runner] Enter registration token: ' + read -r REG_TOKEN + [[ -n "$REG_TOKEN" ]] || die "No token provided." +} + +register_runner() { + if [[ -z "$RUNNER_NAME" ]]; then + RUNNER_NAME="augur-$(hostname -s)" + fi + + log "Registering runner '${RUNNER_NAME}' with labels '${RUNNER_LABELS}'..." + + local config_args=( + --url "https://github.com/${REPO_SLUG}" + --token "$REG_TOKEN" + --name "$RUNNER_NAME" + --labels "$RUNNER_LABELS" + --work "${RUNNER_DIR}/_work" + --unattended + ) + + if [[ "$FORCE" == "true" ]]; then + config_args+=(--replace) + fi + + "${RUNNER_DIR}/config.sh" "${config_args[@]}" + log "Runner registered." +} + +# --------------------------------------------------------------------------- +# launchd service management (macOS) +# --------------------------------------------------------------------------- + +create_plist() { + mkdir -p "${RUNNER_DIR}/logs" + mkdir -p "$(dirname "$PLIST_PATH")" + + cat > "$PLIST_PATH" < + + + + Label + ${PLIST_LABEL} + ProgramArguments + + ${RUNNER_DIR}/run.sh + + WorkingDirectory + ${RUNNER_DIR} + RunAtLoad + + KeepAlive + + StandardOutPath + ${RUNNER_DIR}/logs/stdout.log + StandardErrorPath + ${RUNNER_DIR}/logs/stderr.log + EnvironmentVariables + + PATH + /opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin + HOME + ${HOME} + + + +EOF + + log "Launchd plist created: $PLIST_PATH" +} + +load_service() { + if launchctl list 2>/dev/null | grep -q "$PLIST_LABEL"; then + log "Service already loaded; unloading first..." + launchctl unload "$PLIST_PATH" 2>/dev/null || true + fi + + launchctl load "$PLIST_PATH" + log "Service loaded." +} + +unload_service() { + if launchctl list 2>/dev/null | grep -q "$PLIST_LABEL"; then + launchctl unload "$PLIST_PATH" 2>/dev/null || true + log "Service unloaded." + else + log "Service is not loaded." + fi +} + +service_is_running() { + launchctl list 2>/dev/null | grep -q "$PLIST_LABEL" +} + +# --------------------------------------------------------------------------- +# macOS mode implementations +# --------------------------------------------------------------------------- + +do_setup_darwin() { + detect_repo + + if [[ -f "${RUNNER_DIR}/.runner" ]] && [[ "$FORCE" != "true" ]]; then + log "Runner already configured at $RUNNER_DIR." + log "Use --force to re-setup." + do_status_darwin + return + fi + + download_runner + prompt_token + register_runner + create_plist + load_service + + log "" + log "Setup complete. Runner is registered and running." + log "" + log "To activate self-hosted CI, set the repository variable CI_RUNS_ON to:" + log ' ["self-hosted", "macOS", "ARM64"]' + log "in Settings > Secrets and variables > Actions > Variables." + log "" + log "Or via CLI:" + log " gh variable set CI_RUNS_ON --body '[\"self-hosted\", \"macOS\", \"ARM64\"]'" + log "" + log "Energy saver: ensure your Mac does not sleep while the runner is active." + log " System Settings > Energy Saver > Prevent automatic sleeping" +} + +do_start_darwin() { + [[ -f "${RUNNER_DIR}/.runner" ]] || die "Runner not configured. Run --mode setup first." + + if [[ "$FOREGROUND" == "true" ]]; then + log "Starting runner in foreground (Ctrl-C to stop)..." + exec "${RUNNER_DIR}/run.sh" + fi + + if service_is_running; then + log "Runner service is already running." + return + fi + + if [[ ! -f "$PLIST_PATH" ]]; then + log "Plist not found; recreating..." + create_plist + fi + + load_service + log "Runner started." +} + +do_stop_darwin() { + unload_service + log "Runner stopped." +} + +do_status_darwin() { + log "Runner directory: $RUNNER_DIR" + + if [[ ! -f "${RUNNER_DIR}/.runner" ]]; then + log "Status: NOT CONFIGURED" + log "Run --mode setup to install and register the runner." + return + fi + + # Parse runner config + local runner_name="" + if command -v python3 >/dev/null 2>&1; then + runner_name="$(python3 -c "import json,sys; d=json.load(open(sys.argv[1])); print(d.get('agentName',''))" "${RUNNER_DIR}/.runner" 2>/dev/null || true)" + fi + if [[ -z "$runner_name" ]]; then + runner_name="(could not parse)" + fi + + log "Runner name: $runner_name" + + if service_is_running; then + log "Service: RUNNING" + else + log "Service: STOPPED" + fi + + if pgrep -f "Runner.Listener" >/dev/null 2>&1; then + log "Process: ACTIVE (Runner.Listener found)" + else + log "Process: INACTIVE" + fi + + # Show recent logs + local log_file="${RUNNER_DIR}/logs/stdout.log" + if [[ -f "$log_file" ]]; then + log "" + log "Recent log output (last 10 lines):" + tail -n 10 "$log_file" 2>/dev/null || true + fi + + local diag_dir="${RUNNER_DIR}/_diag" + if [[ -d "$diag_dir" ]]; then + local latest_diag + latest_diag="$(ls -t "${diag_dir}"/Runner_*.log 2>/dev/null | head -n1 || true)" + if [[ -n "$latest_diag" ]]; then + log "" + log "Latest runner diagnostic (last 5 lines):" + tail -n 5 "$latest_diag" 2>/dev/null || true + fi + fi +} + +do_uninstall_darwin() { + log "Uninstalling self-hosted runner..." + + # Stop service first + unload_service + + # Remove plist + if [[ -f "$PLIST_PATH" ]]; then + rm -f "$PLIST_PATH" + log "Removed plist: $PLIST_PATH" + fi + + # Deregister from GitHub + if [[ -f "${RUNNER_DIR}/config.sh" ]]; then + if [[ -z "$REG_TOKEN" ]]; then + detect_repo + log "" + log "A removal token is required to deregister the runner." + log "Obtain one from: https://github.com/${REPO_SLUG}/settings/actions/runners" + log "Or via the API:" + log " curl -X POST -H 'Authorization: token YOUR_PAT' \\" + log " https://api.github.com/repos/${REPO_SLUG}/actions/runners/remove-token" + log "" + printf '[runner] Enter removal token (or press Enter to skip deregistration): ' + read -r REG_TOKEN + fi + + if [[ -n "$REG_TOKEN" ]]; then + "${RUNNER_DIR}/config.sh" remove --token "$REG_TOKEN" || warn "Deregistration failed; you may need to remove the runner manually from GitHub settings." + log "Runner deregistered from GitHub." + else + warn "Skipping deregistration. Remove the runner manually from GitHub settings." + fi + fi + + # Clean up runner directory + if [[ -d "$RUNNER_DIR" ]]; then + log "Removing runner directory: $RUNNER_DIR" + rm -rf "$RUNNER_DIR" + log "Runner directory removed." + fi + + log "Uninstall complete." +} + +# =========================================================================== +# Linux: Docker-based runner via infra/runners/ +# =========================================================================== + +# Ensure Docker and docker compose are available. +ensure_docker() { + require_cmd docker + + # Check for docker compose (v2 plugin or standalone) + if docker compose version >/dev/null 2>&1; then + return + fi + + if command -v docker-compose >/dev/null 2>&1; then + warn "Found docker-compose (standalone). docker compose v2 plugin is recommended." + return + fi + + die "docker compose is required. Install Docker Compose v2: https://docs.docker.com/compose/install/" +} + +# Run docker compose in the infra/runners directory. +# Accepts any docker compose subcommand and arguments. +compose() { + docker compose -f "${INFRA_DIR}/docker-compose.yml" "$@" +} + +do_build_image() { + find_infra_dir + ensure_docker + + local dockerfile_dir="${INFRA_DIR}" + + # Determine the image tag based on whether --push was given. + # With --push: tag includes the registry so docker push knows where to send it. + # Without --push: clean local name. + local image_tag="augur-runner:latest" + if [[ -n "$PUSH_REGISTRY" ]]; then + image_tag="${PUSH_REGISTRY}/augur-runner:latest" + fi + + # Always target linux/amd64 — the Dockerfile hardcodes x86_64 binaries + # (Go linux-amd64, runner agent linux-x64). This ensures correct arch + # even when building on an ARM Mac. + log "Building runner image: ${image_tag} (platform: linux/amd64)" + DOCKER_BUILDKIT=1 docker build --platform linux/amd64 --pull -t "$image_tag" "$dockerfile_dir" + + if [[ -n "$PUSH_REGISTRY" ]]; then + log "Pushing to ${PUSH_REGISTRY}..." + docker push "$image_tag" + log "Image pushed to ${image_tag}" + else + log "Image built locally as ${image_tag}" + log "Use --push to push to a remote registry." + fi +} + +do_setup_linux() { + find_infra_dir + ensure_docker + + log "Docker-based runner setup (infra/runners/)" + log "" + + # Create .env from template if it doesn't exist + if [[ ! -f "${INFRA_DIR}/.env" ]]; then + if [[ -f "${INFRA_DIR}/.env.example" ]]; then + cp "${INFRA_DIR}/.env.example" "${INFRA_DIR}/.env" + log "Created ${INFRA_DIR}/.env from template." + log "Edit this file to set your GITHUB_PAT." + log "" + printf '[runner] Enter your GitHub PAT (or press Enter to edit .env manually later): ' + read -r pat_input + if [[ -n "$pat_input" ]]; then + sed -i "s/^GITHUB_PAT=.*/GITHUB_PAT=${pat_input}/" "${INFRA_DIR}/.env" + log "GITHUB_PAT set in .env" + fi + else + die "Missing .env.example template in ${INFRA_DIR}" + fi + else + log ".env already exists; skipping." + fi + + # Create per-repo env from template if it doesn't exist + if [[ ! -f "${INFRA_DIR}/envs/augur.env" ]]; then + if [[ -f "${INFRA_DIR}/envs/augur.env.example" ]]; then + cp "${INFRA_DIR}/envs/augur.env.example" "${INFRA_DIR}/envs/augur.env" + log "Created ${INFRA_DIR}/envs/augur.env from template." + log "Edit this file to configure REPO_URL, RUNNER_NAME, and resource limits." + else + die "Missing envs/augur.env.example template in ${INFRA_DIR}" + fi + else + log "envs/augur.env already exists; skipping." + fi + + log "" + log "Starting runner..." + compose up -d + + log "" + log "Setup complete. Verify with: ./scripts/runner.sh --mode status" + log "" + log "To activate self-hosted CI, set the repository variable CI_RUNS_ON to:" + log ' ["self-hosted", "Linux", "X64"]' + log "" + log "Via CLI:" + log " gh variable set CI_RUNS_ON --body '[\"self-hosted\", \"Linux\", \"X64\"]'" +} + +do_start_linux() { + find_infra_dir + ensure_docker + + log "Starting Docker runner..." + compose up -d + log "Runner started." +} + +do_stop_linux() { + find_infra_dir + ensure_docker + + log "Stopping Docker runner..." + compose down + log "Runner stopped." +} + +do_status_linux() { + find_infra_dir + ensure_docker + + log "Docker runner status (infra/runners/):" + log "" + compose ps + log "" + log "Recent logs (last 20 lines):" + compose logs --tail 20 2>/dev/null || true +} + +do_uninstall_linux() { + find_infra_dir + ensure_docker + + log "Uninstalling Docker runner..." + compose down -v --rmi local 2>/dev/null || compose down -v + log "Docker runner removed (containers, volumes, local images)." + log "" + log "Note: The runner should auto-deregister from GitHub (ephemeral mode)." + log "If a stale runner remains, remove it manually:" + log " gh api -X DELETE repos/OWNER/REPO/actions/runners/RUNNER_ID" +} + +# =========================================================================== +# Entry point — routes to macOS or Linux implementation +# =========================================================================== + +main() { + parse_args "$@" + + local os + os="$(detect_os)" + + case "$MODE" in + setup) + if [[ "$os" == "darwin" ]]; then do_setup_darwin; else do_setup_linux; fi ;; + start) + if [[ "$os" == "darwin" ]]; then do_start_darwin; else do_start_linux; fi ;; + stop) + if [[ "$os" == "darwin" ]]; then do_stop_darwin; else do_stop_linux; fi ;; + status) + if [[ "$os" == "darwin" ]]; then do_status_darwin; else do_status_linux; fi ;; + build-image) + do_build_image ;; + uninstall) + if [[ "$os" == "darwin" ]]; then do_uninstall_darwin; else do_uninstall_linux; fi ;; + *) + die "unexpected mode: $MODE" ;; + esac +} + +main "$@" diff --git a/runners-conversion/periodVault/actions-local.sh b/runners-conversion/periodVault/actions-local.sh new file mode 100755 index 0000000..fc851a5 --- /dev/null +++ b/runners-conversion/periodVault/actions-local.sh @@ -0,0 +1,210 @@ +#!/usr/bin/env bash +# actions-local.sh +# Local GitHub Actions self-hosted runner lifecycle helper. +set -euo pipefail + +RUNNER_DIR="${RUNNER_DIR:-$HOME/.periodvault-actions-runner}" +RUNNER_LABELS="${RUNNER_LABELS:-periodvault}" +RUNNER_NAME="${RUNNER_NAME:-$(hostname)-periodvault}" +RUNNER_WORKDIR="${RUNNER_WORKDIR:-_work}" +RUNNER_PID_FILE="${RUNNER_PID_FILE:-$RUNNER_DIR/.runner.pid}" +RUNNER_LOG_FILE="${RUNNER_LOG_FILE:-$RUNNER_DIR/runner.log}" + +if git config --get remote.origin.url >/dev/null 2>&1; then + ORIGIN_URL="$(git config --get remote.origin.url)" +else + ORIGIN_URL="" +fi + +if [[ -n "$ORIGIN_URL" && "$ORIGIN_URL" =~ ^git@github\.com:(.*)\.git$ ]]; then + RUNNER_URL_DEFAULT="https://github.com/${BASH_REMATCH[1]}" +elif [[ -n "$ORIGIN_URL" && "$ORIGIN_URL" =~ ^https://github\.com/.*$ ]]; then + RUNNER_URL_DEFAULT="${ORIGIN_URL%.git}" +else + RUNNER_URL_DEFAULT="" +fi + +RUNNER_URL="${RUNNER_URL:-$RUNNER_URL_DEFAULT}" + +ACT_WORKFLOW="${ACT_WORKFLOW:-.github/workflows/ci.yml}" +ACT_IMAGE="${ACT_IMAGE:-ghcr.io/catthehacker/ubuntu:act-latest}" +ACT_DOCKER_SOCKET="${ACT_DOCKER_SOCKET:-/Users/s/.colima/augur-actions/docker.sock}" +ACT_DAEMON_SOCKET="${ACT_DAEMON_SOCKET:-/var/run/docker.sock}" +ACT_DOCKER_CONFIG="${ACT_DOCKER_CONFIG:-/tmp/act-docker-config}" + +usage() { + cat < [job-id] + +Environment variables: + RUNNER_DIR Runner installation directory (default: $RUNNER_DIR) + RUNNER_URL GitHub repo/org URL for runner registration + RUNNER_TOKEN Registration/removal token (required for setup/remove) + RUNNER_LABELS Runner labels (default: $RUNNER_LABELS) + RUNNER_NAME Runner name (default: $RUNNER_NAME) + RUNNER_WORKDIR Runner work dir (default: $RUNNER_WORKDIR) + +Local Actions execution (`run`) variables: + ACT_WORKFLOW Workflow file path (default: $ACT_WORKFLOW) + ACT_IMAGE Container image for self-hosted label mapping (default: $ACT_IMAGE) + ACT_DOCKER_SOCKET Docker host socket (default: $ACT_DOCKER_SOCKET) + ACT_DAEMON_SOCKET In-container daemon socket path (default: $ACT_DAEMON_SOCKET) + ACT_DOCKER_CONFIG Docker config dir used by act (default: $ACT_DOCKER_CONFIG) +EOF +} + +ensure_runner_binaries() { + if [[ ! -x "$RUNNER_DIR/config.sh" || ! -x "$RUNNER_DIR/run.sh" ]]; then + echo "[actions-local] Missing runner binaries in $RUNNER_DIR." + echo "[actions-local] Download and extract GitHub runner there first." + exit 1 + fi +} + +ensure_runner_url() { + if [[ -z "$RUNNER_URL" ]]; then + echo "[actions-local] RUNNER_URL is empty." + echo "[actions-local] Set RUNNER_URL=https://github.com// and retry." + exit 1 + fi +} + +require_token() { + if [[ -z "${RUNNER_TOKEN:-}" ]]; then + echo "[actions-local] RUNNER_TOKEN is required for this command." + exit 1 + fi +} + +cmd_setup() { + ensure_runner_binaries + ensure_runner_url + require_token + + if [[ -f "$RUNNER_DIR/.runner" ]]; then + echo "[actions-local] Runner already configured in $RUNNER_DIR (idempotent no-op)." + exit 0 + fi + + ( + cd "$RUNNER_DIR" + ./config.sh \ + --unattended \ + --replace \ + --url "$RUNNER_URL" \ + --token "$RUNNER_TOKEN" \ + --name "$RUNNER_NAME" \ + --labels "$RUNNER_LABELS" \ + --work "$RUNNER_WORKDIR" + ) + echo "[actions-local] Runner configured." +} + +cmd_start() { + ensure_runner_binaries + if [[ ! -f "$RUNNER_DIR/.runner" ]]; then + echo "[actions-local] Runner not configured. Run setup first." + exit 1 + fi + + if [[ -f "$RUNNER_PID_FILE" ]] && kill -0 "$(cat "$RUNNER_PID_FILE")" >/dev/null 2>&1; then + echo "[actions-local] Runner already running (pid $(cat "$RUNNER_PID_FILE"))." + exit 0 + fi + + ( + cd "$RUNNER_DIR" + nohup ./run.sh >"$RUNNER_LOG_FILE" 2>&1 & + echo $! >"$RUNNER_PID_FILE" + ) + echo "[actions-local] Runner started (pid $(cat "$RUNNER_PID_FILE"))." + echo "[actions-local] Log: $RUNNER_LOG_FILE" +} + +cmd_stop() { + if [[ ! -f "$RUNNER_PID_FILE" ]]; then + echo "[actions-local] Runner is not running." + exit 0 + fi + + pid="$(cat "$RUNNER_PID_FILE")" + if kill -0 "$pid" >/dev/null 2>&1; then + kill "$pid" + rm -f "$RUNNER_PID_FILE" + echo "[actions-local] Runner stopped (pid $pid)." + else + rm -f "$RUNNER_PID_FILE" + echo "[actions-local] Runner pid file was stale; cleaned up." + fi +} + +cmd_status() { + if [[ -f "$RUNNER_DIR/.runner" ]]; then + echo "[actions-local] configured: yes" + else + echo "[actions-local] configured: no" + fi + + if [[ -f "$RUNNER_PID_FILE" ]] && kill -0 "$(cat "$RUNNER_PID_FILE")" >/dev/null 2>&1; then + echo "[actions-local] running: yes (pid $(cat "$RUNNER_PID_FILE"))" + else + echo "[actions-local] running: no" + fi + + echo "[actions-local] runner-dir: $RUNNER_DIR" + echo "[actions-local] runner-labels: $RUNNER_LABELS" +} + +cmd_remove() { + ensure_runner_binaries + require_token + if [[ ! -f "$RUNNER_DIR/.runner" ]]; then + echo "[actions-local] Runner is not configured." + exit 0 + fi + + ( + cd "$RUNNER_DIR" + ./config.sh remove --token "$RUNNER_TOKEN" + ) + echo "[actions-local] Runner registration removed." +} + +cmd_run() { + local job="${1:-sdd-gate}" + + if ! command -v act >/dev/null 2>&1; then + echo "[actions-local] 'act' is required for local workflow execution." + exit 1 + fi + + mkdir -p "$ACT_DOCKER_CONFIG" + if [[ ! -f "$ACT_DOCKER_CONFIG/config.json" ]]; then + printf '{"auths":{}}\n' >"$ACT_DOCKER_CONFIG/config.json" + fi + + DOCKER_CONFIG="$ACT_DOCKER_CONFIG" \ + DOCKER_HOST="unix://$ACT_DOCKER_SOCKET" \ + act -W "$ACT_WORKFLOW" \ + -j "$job" \ + -P "self-hosted=$ACT_IMAGE" \ + -P "macos-latest=$ACT_IMAGE" \ + --container-architecture linux/amd64 \ + --container-daemon-socket "$ACT_DAEMON_SOCKET" +} + +COMMAND="${1:-}" +case "$COMMAND" in + setup) cmd_setup ;; + start) cmd_start ;; + stop) cmd_stop ;; + status) cmd_status ;; + remove) cmd_remove ;; + run) cmd_run "${2:-}" ;; + ""|--help|-h) usage ;; + *) + echo "[actions-local] Unknown command: $COMMAND" + usage + exit 1 + ;; +esac diff --git a/runners-conversion/periodVault/check-process.sh b/runners-conversion/periodVault/check-process.sh new file mode 100755 index 0000000..9ffca6c --- /dev/null +++ b/runners-conversion/periodVault/check-process.sh @@ -0,0 +1,152 @@ +#!/usr/bin/env bash +# check-process.sh +# Process compliance checks for PR branches. +# Validates: no main commits, no .DS_Store, scripts executable, +# spec artifacts exist, iteration counter incremented, commit tags, +# and file-scope allowlist enforcement. +set -euo pipefail + +BASE_REF="${1:-origin/main}" + +if ! git rev-parse --verify "$BASE_REF" >/dev/null 2>&1; then + BASE_REF="HEAD~1" +fi + +BRANCH="$(git rev-parse --abbrev-ref HEAD)" +# In GitHub Actions merge refs, HEAD is detached. Derive branch from GITHUB_HEAD_REF +# or from the spec directory that matches changed files. +if [[ "$BRANCH" == "HEAD" ]]; then + if [[ -n "${GITHUB_HEAD_REF:-}" ]]; then + BRANCH="$GITHUB_HEAD_REF" + else + # Fallback: find the spec directory from changed files + for f in "${CHANGED_FILES[@]:-}"; do + if [[ "$f" == specs/*/spec.md ]]; then + BRANCH="${f#specs/}" + BRANCH="${BRANCH%/spec.md}" + break + fi + done + fi +fi +if [[ "$BRANCH" == "main" ]]; then + echo "[check-process] Failing: direct changes on 'main' are not allowed." + exit 1 +fi + +CHANGED_FILES=() +while IFS= read -r line; do + [[ -n "$line" ]] && CHANGED_FILES+=("$line") +done < <(git diff --name-only "$BASE_REF"...HEAD) + +if [[ ${#CHANGED_FILES[@]} -eq 0 ]]; then + echo "[check-process] No changed files relative to $BASE_REF." + exit 0 +fi + +FAILURES=0 + +# --- Check 1: No .DS_Store --- +if command -v rg >/dev/null 2>&1; then + HAS_DS_STORE="$(printf '%s\n' "${CHANGED_FILES[@]}" | rg -q '(^|/)\.DS_Store$' && echo 1 || echo 0)" +else + HAS_DS_STORE="$(printf '%s\n' "${CHANGED_FILES[@]}" | grep -Eq '(^|/)\.DS_Store$' && echo 1 || echo 0)" +fi +if [[ "$HAS_DS_STORE" == "1" ]]; then + echo "[check-process] FAIL: .DS_Store must not be committed." + FAILURES=$((FAILURES + 1)) +fi + +# --- Check 2: Scripts executable --- +for file in "${CHANGED_FILES[@]}"; do + if [[ "$file" == scripts/*.sh ]] && [[ -f "$file" ]] && [[ ! -x "$file" ]]; then + echo "[check-process] FAIL: script is not executable: $file" + FAILURES=$((FAILURES + 1)) + fi +done + +# --- Check 3: Spec artifacts exist --- +SPEC_DIR="specs/${BRANCH}" +if [[ -d "$SPEC_DIR" ]]; then + for artifact in spec.md plan.md tasks.md allowed-files.txt; do + if [[ ! -f "$SPEC_DIR/$artifact" ]]; then + echo "[check-process] FAIL: missing spec artifact: $SPEC_DIR/$artifact" + FAILURES=$((FAILURES + 1)) + fi + done +else + echo "[check-process] FAIL: spec directory not found: $SPEC_DIR" + FAILURES=$((FAILURES + 1)) +fi + +# --- Check 4: ITERATION incremented --- +if [[ -f ITERATION ]]; then + BRANCH_ITER="$(tr -d '[:space:]' < ITERATION)" + BASE_ITER="$(git show "$BASE_REF":ITERATION 2>/dev/null | tr -d '[:space:]' || echo "0")" + if [[ "$BRANCH_ITER" -le "$BASE_ITER" ]] 2>/dev/null; then + echo "[check-process] FAIL: ITERATION ($BRANCH_ITER) must be > base ($BASE_ITER)" + FAILURES=$((FAILURES + 1)) + fi +fi + +# --- Check 5: Commit messages contain [iter N] --- +# Skip merge commits (merge resolution, GitHub merge refs) — they don't carry iter tags. +COMMITS_WITHOUT_TAG=0 +while IFS= read -r msg; do + # Skip merge commits (start with "Merge " or "merge:") + if echo "$msg" | grep -qEi '^(Merge |merge:)'; then + continue + fi + if ! echo "$msg" | grep -qE '\[iter [0-9]+\]'; then + echo "[check-process] FAIL: commit missing [iter N] tag: $msg" + COMMITS_WITHOUT_TAG=$((COMMITS_WITHOUT_TAG + 1)) + fi +done < <(git log --format='%s' "$BASE_REF"...HEAD) +if [[ $COMMITS_WITHOUT_TAG -gt 0 ]]; then + FAILURES=$((FAILURES + COMMITS_WITHOUT_TAG)) +fi + +# --- Check 6: File-scope allowlist --- +ALLOWLIST="$SPEC_DIR/allowed-files.txt" +if [[ -f "$ALLOWLIST" ]]; then + ALLOWED_PATTERNS=() + while IFS= read -r line; do + # Skip comments and blank lines + line="$(echo "$line" | sed 's/#.*//' | xargs)" + [[ -z "$line" ]] && continue + ALLOWED_PATTERNS+=("$line") + done < "$ALLOWLIST" + + for file in "${CHANGED_FILES[@]}"; do + MATCHED=false + for pattern in "${ALLOWED_PATTERNS[@]}"; do + # Use bash pattern matching (supports * and **) + # Convert ** to match any path and * to match within directory + local_pattern="${pattern}" + # shellcheck disable=SC2254 + if [[ "$file" == $local_pattern ]]; then + MATCHED=true + break + fi + # Also try fnmatch-style: specs/foo/* should match specs/foo/bar.md + if command -v python3 >/dev/null 2>&1; then + if python3 -c "import fnmatch; exit(0 if fnmatch.fnmatch('$file', '$local_pattern') else 1)" 2>/dev/null; then + MATCHED=true + break + fi + fi + done + if [[ "$MATCHED" == "false" ]]; then + echo "[check-process] FAIL: file not in allowlist: $file" + FAILURES=$((FAILURES + 1)) + fi + done +fi + +# --- Result --- +if [[ $FAILURES -gt 0 ]]; then + echo "[check-process] FAILED ($FAILURES issues)" + exit 1 +fi + +echo "[check-process] PASS ($BASE_REF...HEAD)" diff --git a/runners-conversion/periodVault/ci-local.sh b/runners-conversion/periodVault/ci-local.sh new file mode 100755 index 0000000..d335777 --- /dev/null +++ b/runners-conversion/periodVault/ci-local.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +# ci-local.sh +# Local equivalent of CI checks for self-hosted runner validation. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$PROJECT_ROOT" + +RUN_CONTRACTS=0 +RUN_BACKEND=0 +RUN_ANDROID=0 +RUN_IOS=0 +SKIP_INSTALL=0 + +usage() { + cat <<'EOF' +Usage: ./scripts/ci-local.sh [options] + +Options: + --contracts Run process/SDD/TDD gate scripts. + --backend Run lint + shared/android unit tests. + --android Run Android emulator UI tests. + --ios Run iOS simulator UI tests. + --all Run contracts + backend + android + ios. + --skip-install Skip setup bootstrap check. + --help Show this help. + +If no test scope flags are provided, defaults to: --contracts --backend +EOF +} + +for arg in "$@"; do + case "$arg" in + --contracts) RUN_CONTRACTS=1 ;; + --backend) RUN_BACKEND=1 ;; + --android) RUN_ANDROID=1 ;; + --ios) RUN_IOS=1 ;; + --all) + RUN_CONTRACTS=1 + RUN_BACKEND=1 + RUN_ANDROID=1 + RUN_IOS=1 + ;; + --skip-install) SKIP_INSTALL=1 ;; + --help|-h) usage; exit 0 ;; + *) + echo "[ci-local] Unknown option: $arg" + usage + exit 1 + ;; + esac +done + +if [[ $RUN_CONTRACTS -eq 0 && $RUN_BACKEND -eq 0 && $RUN_ANDROID -eq 0 && $RUN_IOS -eq 0 ]]; then + RUN_CONTRACTS=1 + RUN_BACKEND=1 +fi + +if [[ $SKIP_INSTALL -eq 0 ]]; then + "$SCRIPT_DIR/setup-dev-environment.sh" --verify +fi + +TIMESTAMP="$(date +%Y%m%d-%H%M%S)" +LOG_DIR="$PROJECT_ROOT/build/local-ci" +LOG_FILE="$LOG_DIR/local-ci-$TIMESTAMP.log" +mkdir -p "$LOG_DIR" + +run_step() { + local step_name="$1" + shift + echo "" + echo "================================================" + echo "[ci-local] $step_name" + echo "================================================" + "$@" 2>&1 | tee -a "$LOG_FILE" +} + +echo "[ci-local] Writing log to $LOG_FILE" +echo "[ci-local] Starting local CI run at $(date -u '+%Y-%m-%dT%H:%M:%SZ')" | tee -a "$LOG_FILE" + +if [[ $RUN_CONTRACTS -eq 1 ]]; then + run_step "check-process" "$SCRIPT_DIR/check-process.sh" origin/main + run_step "validate-sdd" "$SCRIPT_DIR/validate-sdd.sh" origin/main + run_step "validate-tdd" env FORCE_AUDIT_GATES=1 "$SCRIPT_DIR/validate-tdd.sh" origin/main +fi + +if [[ $RUN_BACKEND -eq 1 ]]; then + run_step "ktlint+unit-tests" ./gradlew ktlintCheck shared:jvmTest androidApp:testDebugUnitTest +fi + +if [[ $RUN_ANDROID -eq 1 ]]; then + run_step "android-ui-tests" "$SCRIPT_DIR/run-emulator-tests.sh" android +fi + +if [[ $RUN_IOS -eq 1 ]]; then + run_step "ios-ui-tests" "$SCRIPT_DIR/run-emulator-tests.sh" ios +fi + +echo "" +echo "[ci-local] PASS" +echo "[ci-local] Log: $LOG_FILE" diff --git a/runners-conversion/periodVault/fix-android-emulator.sh b/runners-conversion/periodVault/fix-android-emulator.sh new file mode 100755 index 0000000..a84c46e --- /dev/null +++ b/runners-conversion/periodVault/fix-android-emulator.sh @@ -0,0 +1,244 @@ +#!/usr/bin/env bash +# fix-android-emulator.sh — Install Android OS system image and fix/create phone or Wear OS AVD +# Usage: ./scripts/fix-android-emulator.sh +# Run when emulator fails with "No initial system image for this configuration". +# Supports phone (default) and Wear OS emulators. Requires: Android SDK (ANDROID_HOME or +# ~/Library/Android/sdk). Installs SDK command-line tools if missing. +# +# ENV VARs (defaults use latest SDK): +# ANDROID_HOME SDK root (default: $HOME/Library/Android/sdk on macOS) +# ANDROID_SDK_ROOT Same as ANDROID_HOME if set +# ANDROID_EMULATOR_API_LEVEL API level, e.g. 35 or 30 (default: auto = latest from sdkmanager --list) +# ANDROID_AVD_NAME AVD name to fix or create (default: phone, or wear when type=wearos) +# ANDROID_EMULATOR_DEVICE Device profile for new AVDs (default: pixel_8 for phone, wear_os_square for Wear) +# ANDROID_EMULATOR_TYPE phone (default) or wearos — which system image and device profile to use +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +# --- Default ENV VARs: latest SDK --- +# ANDROID_HOME: SDK root (default: $HOME/Library/Android/sdk on macOS) +export ANDROID_HOME="${ANDROID_HOME:-${ANDROID_SDK_ROOT:-}}" +if [[ -z "$ANDROID_HOME" ]]; then + if [[ -d "$HOME/Library/Android/sdk" ]]; then + export ANDROID_HOME="$HOME/Library/Android/sdk" + else + echo -e "${RED}ERROR: ANDROID_HOME not set and ~/Library/Android/sdk not found.${NC}" + echo "Set ANDROID_HOME to your Android SDK root, or install Android Studio / SDK." + exit 1 + fi +fi + +# Emulator type: phone (default) or wearos — determines system image and default device profile +EMULATOR_TYPE="${ANDROID_EMULATOR_TYPE:-phone}" +EMULATOR_TYPE=$(echo "$EMULATOR_TYPE" | tr '[:upper:]' '[:lower:]') +# AVD name and device profile (override with ANDROID_AVD_NAME / ANDROID_EMULATOR_DEVICE) +if [[ "$EMULATOR_TYPE" == "wearos" ]]; then + AVD_NAME="${ANDROID_AVD_NAME:-wear}" + DEVICE_PROFILE="${ANDROID_EMULATOR_DEVICE:-wear_os_square}" +else + AVD_NAME="${ANDROID_AVD_NAME:-phone}" + DEVICE_PROFILE="${ANDROID_EMULATOR_DEVICE:-pixel_8}" +fi + +# --- Find or install SDK command-line tools (sdkmanager, avdmanager) --- +SDKMANAGER="" +AVDMANAGER="" +for d in "$ANDROID_HOME/cmdline-tools/latest/bin" "$ANDROID_HOME/tools/bin"; do + if [[ -x "$d/sdkmanager" ]]; then + SDKMANAGER="$d/sdkmanager" + AVDMANAGER="$d/avdmanager" + break + fi +done +if [[ -z "$SDKMANAGER" ]] && command -v sdkmanager &>/dev/null; then + SDKMANAGER="sdkmanager" + AVDMANAGER="avdmanager" +fi + +install_cmdline_tools() { + echo -e "${YELLOW}Downloading Android SDK command-line tools...${NC}" + local zip_url="https://dl.google.com/android/repository/commandlinetools-mac-11076708_latest.zip" + local zip_file="$PROJECT_ROOT/build/cmdlinetools.zip" + local tmp_dir="$ANDROID_HOME/cmdline-tools" + mkdir -p "$(dirname "$zip_file")" "$tmp_dir" + if ! curl -fsSL -o "$zip_file" "$zip_url"; then + echo -e "${RED}Download failed. Install command-line tools manually:${NC}" + echo " Android Studio → Settings → Appearance & Behavior → System Settings → Android SDK" + echo " → SDK Tools tab → check 'Android SDK Command-line Tools (latest)' → Apply" + exit 1 + fi + (cd "$tmp_dir" && unzip -q -o "$zip_file" && mv cmdline-tools latest 2>/dev/null || true) + rm -f "$zip_file" + SDKMANAGER="$ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager" + AVDMANAGER="$ANDROID_HOME/cmdline-tools/latest/bin/avdmanager" + if [[ ! -x "$SDKMANAGER" ]]; then + # Some zips unpack to cmdline-tools/ inside the zip + if [[ -d "$tmp_dir/cmdline-tools" ]]; then + mv "$tmp_dir/cmdline-tools" "$tmp_dir/latest" + fi + SDKMANAGER="$ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager" + AVDMANAGER="$ANDROID_HOME/cmdline-tools/latest/bin/avdmanager" + fi + if [[ ! -x "$SDKMANAGER" ]]; then + echo -e "${RED}Command-line tools install failed. Install from Android Studio SDK Manager.${NC}" + exit 1 + fi + echo -e "${GREEN}Command-line tools installed.${NC}" +} + +if [[ -z "$SDKMANAGER" ]] || [[ ! -x "$SDKMANAGER" ]]; then + install_cmdline_tools +fi + +# --- Ensure PATH for this script --- +export PATH="$ANDROID_HOME/cmdline-tools/latest/bin:$ANDROID_HOME/emulator:$ANDROID_HOME/platform-tools:$PATH" + +# --- Default to latest SDK system image (when ANDROID_EMULATOR_API_LEVEL unset) --- +# Parses sdkmanager --list for highest API level with Google Play arm64-v8a image. +set_latest_system_image() { + local list_output + list_output=$("$SDKMANAGER" --list 2>/dev/null) || true + local best_api=0 + local best_package="" + local pkg api + # Match package lines (path may be first column or whole line): system-images;android-NN;google_apis...;arm64-v8a + while IFS= read -r line; do + pkg=$(echo "$line" | sed -n 's/.*\(system-images;android-[0-9][0-9]*;google_apis[^;]*;arm64-v8a\).*/\1/p') + [[ -z "$pkg" ]] && continue + api=$(echo "$pkg" | sed 's/.*android-\([0-9][0-9]*\).*/\1/') + if [[ "$api" =~ ^[0-9]+$ ]] && [[ "$api" -gt "$best_api" ]]; then + best_api="$api" + best_package="$pkg" + fi + done <<< "$list_output" + if [[ -n "$best_package" ]] && [[ "$best_api" -gt 0 ]]; then + ANDROID_EMULATOR_API_LEVEL="$best_api" + SYSTEM_IMAGE_PACKAGE="$best_package" + echo -e "${GREEN}Using latest SDK system image: API $best_api ($SYSTEM_IMAGE_PACKAGE)${NC}" + fi +} + +# Parses sdkmanager --list for highest API level with Wear OS image. +# Matches: system-images;android-NN;wear;arm64-v8a or ...;google_apis;wear_os_arm64 +set_latest_system_image_wear() { + local list_output + list_output=$("$SDKMANAGER" --list 2>/dev/null) || true + local best_api=0 + local best_package="" + local pkg api + while IFS= read -r line; do + # Must be a system image line containing android-NN and wear (wear; or wear_os) + [[ "$line" != *"system-images"* ]] && continue + [[ "$line" != *"android-"* ]] && continue + [[ "$line" != *"wear"* ]] && continue + # Extract package: system-images;android-NN;... (semicolon-separated, may be first column) + pkg=$(echo "$line" | sed -n 's/.*\(system-images;android-[0-9][0-9]*;[^;]*;[^;]*\).*/\1/p') + [[ -z "$pkg" ]] && continue + api=$(echo "$pkg" | sed 's/.*android-\([0-9][0-9]*\).*/\1/') + if [[ "$api" =~ ^[0-9]+$ ]] && [[ "$api" -gt "$best_api" ]]; then + best_api="$api" + best_package="$pkg" + fi + done <<< "$list_output" + if [[ -n "$best_package" ]] && [[ "$best_api" -gt 0 ]]; then + ANDROID_EMULATOR_API_LEVEL="$best_api" + SYSTEM_IMAGE_PACKAGE="$best_package" + echo -e "${GREEN}Using latest Wear OS system image: API $best_api ($SYSTEM_IMAGE_PACKAGE)${NC}" + fi +} + +# If ANDROID_EMULATOR_API_LEVEL not set, detect latest from SDK (phone or Wear OS) +if [[ -z "${ANDROID_EMULATOR_API_LEVEL:-}" ]]; then + if [[ "$EMULATOR_TYPE" == "wearos" ]]; then + set_latest_system_image_wear + else + set_latest_system_image + fi +fi + +# Fallback when detection didn't set a package (e.g. no sdkmanager list) +API_LEVEL="${ANDROID_EMULATOR_API_LEVEL:-35}" +if [[ -z "${SYSTEM_IMAGE_PACKAGE:-}" ]]; then + if [[ "$EMULATOR_TYPE" == "wearos" ]]; then + # Wear OS: images often at API 30; package format android-NN;wear;arm64-v8a + WEAR_API="${ANDROID_EMULATOR_API_LEVEL:-30}" + SYSTEM_IMAGE_PACKAGE="system-images;android-${WEAR_API};wear;arm64-v8a" + API_LEVEL="$WEAR_API" + elif [[ "$API_LEVEL" == "36" ]]; then + SYSTEM_IMAGE_PACKAGE="system-images;android-36;google_apis_playstore_ps16k;arm64-v8a" + else + SYSTEM_IMAGE_PACKAGE="system-images;android-${API_LEVEL};google_apis_playstore;arm64-v8a" + fi +fi + +# --- Accept licenses (non-interactive) --- +echo -e "${YELLOW}Accepting SDK licenses...${NC}" +yes 2>/dev/null | "$SDKMANAGER" --licenses >/dev/null 2>&1 || true + +# --- Install system image --- +echo -e "${YELLOW}Installing system image: $SYSTEM_IMAGE_PACKAGE${NC}" +if ! "$SDKMANAGER" "$SYSTEM_IMAGE_PACKAGE"; then + echo -e "${RED}Failed to install system image. Try a different API level:${NC}" + echo " ANDROID_EMULATOR_API_LEVEL=34 $0" + exit 1 +fi + +# --- Verify image has system.img (path from package: a;b;c;d -> a/b/c/d) --- +REL_IMAGE_DIR=$(echo "$SYSTEM_IMAGE_PACKAGE" | sed 's/;/\//g') +IMAGE_DIR="$ANDROID_HOME/$REL_IMAGE_DIR" +if [[ ! -f "$IMAGE_DIR/system.img" ]]; then + echo -e "${RED}Installed image missing system.img at $IMAGE_DIR${NC}" + exit 1 +fi +echo -e "${GREEN}System image OK: $IMAGE_DIR${NC}" + +# --- Resolve AVD directory (phone may point to e.g. Pixel_9_Pro.avd via .ini) --- +AVD_INI="$HOME/.android/avd/${AVD_NAME}.ini" +AVD_DIR="" +if [[ -f "$AVD_INI" ]]; then + AVD_PATH=$(grep "^path=" "$AVD_INI" 2>/dev/null | cut -d= -f2-) + if [[ -n "$AVD_PATH" ]] && [[ -d "$AVD_PATH" ]]; then + AVD_DIR="$AVD_PATH" + fi +fi +if [[ -z "$AVD_DIR" ]]; then + AVD_DIR="$HOME/.android/avd/${AVD_NAME}.avd" +fi + +# Update existing AVD config to use the working system image +if [[ -d "$AVD_DIR" ]] && [[ -f "$AVD_DIR/config.ini" ]] && [[ -f "$IMAGE_DIR/system.img" ]]; then + CONFIG="$AVD_DIR/config.ini" + if grep -q "image.sysdir" "$CONFIG"; then + # Portable sed: write to temp then mv (macOS sed -i needs backup arg) + sed "s|image.sysdir.1=.*|image.sysdir.1=$REL_IMAGE_DIR/|" "$CONFIG" > "${CONFIG}.tmp" + mv "${CONFIG}.tmp" "$CONFIG" + echo -e "${GREEN}Updated AVD config to use $REL_IMAGE_DIR${NC}" + fi +elif [[ ! -d "$AVD_DIR" ]]; then + echo -e "${YELLOW}Creating AVD '$AVD_NAME' with device profile $DEVICE_PROFILE...${NC}" + echo no | "$AVDMANAGER" create avd \ + -n "$AVD_NAME" \ + -k "$SYSTEM_IMAGE_PACKAGE" \ + -d "$DEVICE_PROFILE" \ + --force + echo -e "${GREEN}AVD '$AVD_NAME' created.${NC}" +fi + +echo "" +echo -e "${GREEN}Done. Start the emulator with:${NC}" +echo " emulator -avd $AVD_NAME" +echo "" +if [[ "$EMULATOR_TYPE" == "wearos" ]]; then + echo "Fix Wear OS only: ANDROID_EMULATOR_TYPE=wearos $0" + echo "Or fix both phone and Wear: $0 && ANDROID_EMULATOR_TYPE=wearos $0" +else + echo "Or run deploy: ./scripts/deploy-emulator.sh android" +fi +echo "" diff --git a/runners-conversion/periodVault/init-audit.sh b/runners-conversion/periodVault/init-audit.sh new file mode 100755 index 0000000..93661c7 --- /dev/null +++ b/runners-conversion/periodVault/init-audit.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +# init-audit.sh +# Initializes local audit scaffolding used by process gates. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +AUDIT_DIR="$PROJECT_ROOT/audit" + +mkdir -p "$AUDIT_DIR" + +if [[ ! -f "$AUDIT_DIR/requirements.json" ]]; then + cat >"$AUDIT_DIR/requirements.json" <<'JSON' +{ + "version": 1, + "lastUpdated": "2026-02-21", + "requirements": [ + { + "id": "R-CI-SELF-HOSTED", + "description": "CI jobs run on self-hosted runner labels with documented fallback." + }, + { + "id": "R-DEV-SETUP", + "description": "Repository provides idempotent bootstrap script and verification commands." + }, + { + "id": "R-DEV-GUIDE", + "description": "Developer guide is aligned with README, scripts, and local workflow." + } + ] +} +JSON + echo "[init-audit] Created audit/requirements.json" +else + echo "[init-audit] Found audit/requirements.json" +fi + +if [[ ! -f "$AUDIT_DIR/test-runs.json" ]]; then + cat >"$AUDIT_DIR/test-runs.json" <<'JSON' +{ + "version": 1, + "runs": [] +} +JSON + echo "[init-audit] Created audit/test-runs.json" +else + echo "[init-audit] Found audit/test-runs.json" +fi + +if [[ ! -f "$PROJECT_ROOT/CODEX-REPORT.md" ]]; then + cat >"$PROJECT_ROOT/CODEX-REPORT.md" <<'MD' +# CODEX Report + +## Requirements Mapping +- R-CI-SELF-HOSTED: pending +- R-DEV-SETUP: pending +- R-DEV-GUIDE: pending + +## Constitution Compliance Matrix +| Principle | Status | Notes | +|-----------|--------|-------| +| I | pending | | +| X | pending | | +| XX | pending | | + +## Evidence +- Add command outputs and CI links. + +## Risks +- Add known risks and mitigations. +MD + echo "[init-audit] Created CODEX-REPORT.md template" +else + echo "[init-audit] Found CODEX-REPORT.md" +fi + +echo "[init-audit] Audit scaffolding ready." diff --git a/runners-conversion/periodVault/monitor-pr-checks.sh b/runners-conversion/periodVault/monitor-pr-checks.sh new file mode 100755 index 0000000..9749c7e --- /dev/null +++ b/runners-conversion/periodVault/monitor-pr-checks.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: scripts/monitor-pr-checks.sh + +Environment overrides: + CHECK_FAST_INTERVAL_SECONDS default: 60 + CHECK_SLOW_INTERVAL_SECONDS default: 180 + CHECK_MIN_FAST_WINDOW_SECONDS default: 900 + CHECK_STABLE_CYCLES_FOR_SLOW default: 5 +EOF +} + +if [[ "${1:-}" == "-h" ]] || [[ "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +PR_NUMBER="${1:-}" +if [[ -z "$PR_NUMBER" ]]; then + usage >&2 + exit 2 +fi + +FAST_INTERVAL_SECONDS="${CHECK_FAST_INTERVAL_SECONDS:-60}" +SLOW_INTERVAL_SECONDS="${CHECK_SLOW_INTERVAL_SECONDS:-180}" +MIN_FAST_WINDOW_SECONDS="${CHECK_MIN_FAST_WINDOW_SECONDS:-900}" +STABLE_CYCLES_FOR_SLOW="${CHECK_STABLE_CYCLES_FOR_SLOW:-5}" + +start_ts="$(date +%s)" +stable_cycles=0 +last_fingerprint="" +err_file="$(mktemp)" +trap 'rm -f "$err_file"' EXIT + +echo "Monitoring PR #${PR_NUMBER} checks" +echo "Policy: fast=${FAST_INTERVAL_SECONDS}s, slow=${SLOW_INTERVAL_SECONDS}s, min-fast-window=${MIN_FAST_WINDOW_SECONDS}s, stable-cycles-for-slow=${STABLE_CYCLES_FOR_SLOW}" + +while true; do + now_ts="$(date +%s)" + elapsed="$((now_ts - start_ts))" + elapsed_mm="$((elapsed / 60))" + elapsed_ss="$((elapsed % 60))" + + if ! checks_json="$(gh pr checks "$PR_NUMBER" --json name,state,link 2>"$err_file")"; then + err_msg="$(tr '\n' ' ' <"$err_file" | sed 's/[[:space:]]\+/ /g; s/^ //; s/ $//')" + echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] elapsed ${elapsed_mm}m${elapsed_ss}s | check query failed: ${err_msg:-unknown error}" + sleep "$FAST_INTERVAL_SECONDS" + continue + fi + if [[ "$checks_json" == "[]" ]]; then + echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] elapsed ${elapsed_mm}m${elapsed_ss}s | no checks yet" + sleep "$FAST_INTERVAL_SECONDS" + continue + fi + + success_count="$(jq '[.[] | select(.state=="SUCCESS")] | length' <<<"$checks_json")" + failure_count="$(jq '[.[] | select(.state=="FAILURE" or .state=="ERROR" or .state=="STARTUP_FAILURE" or .state=="TIMED_OUT")] | length' <<<"$checks_json")" + cancelled_count="$(jq '[.[] | select(.state=="CANCELLED")] | length' <<<"$checks_json")" + skipped_count="$(jq '[.[] | select(.state=="SKIPPED" or .state=="NEUTRAL")] | length' <<<"$checks_json")" + active_count="$(jq '[.[] | select(.state=="PENDING" or .state=="QUEUED" or .state=="IN_PROGRESS" or .state=="WAITING" or .state=="REQUESTED")] | length' <<<"$checks_json")" + total_count="$(jq 'length' <<<"$checks_json")" + + fingerprint="$(jq -r 'sort_by(.name) | map("\(.name)=\(.state)") | join(";")' <<<"$checks_json")" + if [[ "$fingerprint" == "$last_fingerprint" ]]; then + stable_cycles="$((stable_cycles + 1))" + else + stable_cycles=0 + last_fingerprint="$fingerprint" + fi + + echo "[$(date -u '+%Y-%m-%dT%H:%M:%SZ')] elapsed ${elapsed_mm}m${elapsed_ss}s | total=${total_count} success=${success_count} skipped=${skipped_count} active=${active_count} failed=${failure_count} cancelled=${cancelled_count}" + + if [[ "$failure_count" -gt 0 ]]; then + echo "Failing checks:" + jq -r '.[] | select(.state=="FAILURE" or .state=="ERROR" or .state=="STARTUP_FAILURE" or .state=="TIMED_OUT") | " - \(.name): \(.state) \(.link)"' <<<"$checks_json" + exit 1 + fi + + if [[ "$active_count" -eq 0 ]]; then + if [[ "$cancelled_count" -gt 0 ]]; then + echo "Checks ended with cancellations." + jq -r '.[] | select(.state=="CANCELLED") | " - \(.name): \(.link)"' <<<"$checks_json" + exit 1 + fi + if [[ "$((success_count + skipped_count))" -eq "$total_count" ]]; then + echo "All checks passed." + exit 0 + fi + echo "Checks finished with non-success states." + jq -r '.[] | " - \(.name): \(.state) \(.link)"' <<<"$checks_json" + exit 1 + fi + + if (( elapsed < MIN_FAST_WINDOW_SECONDS )); then + sleep "$FAST_INTERVAL_SECONDS" + continue + fi + + if (( stable_cycles >= STABLE_CYCLES_FOR_SLOW )); then + sleep "$SLOW_INTERVAL_SECONDS" + else + sleep "$FAST_INTERVAL_SECONDS" + fi +done diff --git a/runners-conversion/periodVault/run-emulator-tests.sh b/runners-conversion/periodVault/run-emulator-tests.sh new file mode 100755 index 0000000..3f52292 --- /dev/null +++ b/runners-conversion/periodVault/run-emulator-tests.sh @@ -0,0 +1,538 @@ +#!/usr/bin/env bash +# run-emulator-tests.sh — Run all emulator/simulator UI tests for PeriodVault +# Usage: ./scripts/run-emulator-tests.sh [android|ios|all] +# Logs to build/emulator-tests.log; script reads the log to detect adb errors (e.g. multiple devices). +# +# iOS watchdog env controls: +# IOS_HEARTBEAT_SECONDS (default: 30) +# IOS_STARTUP_PROGRESS_TIMEOUT_SECONDS (default: 900) +# IOS_TEST_STALL_TIMEOUT_SECONDS (default: 480) +# IOS_UNRESPONSIVE_STALL_TIMEOUT_SECONDS(default: 120) +# IOS_HARD_TIMEOUT_SECONDS (default: 10800) +# IOS_ACTIVE_CPU_THRESHOLD (default: 1.0) +set -euo pipefail + +PLATFORM="${1:-all}" +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$PROJECT_ROOT" + +# shellcheck source=scripts/lib.sh +source "$SCRIPT_DIR/lib.sh" +ensure_log_file "emulator-tests.log" + +# Start Android emulator headless for test runs (no GUI window needed) +export EMULATOR_HEADLESS=1 + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +ANDROID_PASS=0 +IOS_PASS=0 +ANDROID_FAIL=0 +IOS_FAIL=0 + +run_android() { + echo -e "${YELLOW}=== Android Emulator Tests ===${NC}" + + if ! ensure_android_emulator; then + echo -e "${RED}ERROR: Could not start or connect to Android emulator. See $LOG_FILE${NC}" + ANDROID_FAIL=1 + return 1 + fi + + # Disable animations for stable UI tests + run_and_log "adb_disable_animations" adb shell "settings put global window_animation_scale 0; settings put global transition_animation_scale 0; settings put global animator_duration_scale 0" || true + + # Pre-flight: verify emulator is responsive via adb shell + echo "Verifying Android emulator is responsive..." + if ! adb shell getprop sys.boot_completed 2>/dev/null | grep -q "1"; then + echo -e "${RED}ERROR: Android emulator not responsive (sys.boot_completed != 1). Aborting.${NC}" + ANDROID_FAIL=1 + return 1 + fi + echo "Android emulator is responsive." + + # Uninstall the app to ensure a clean database for tests + echo "Cleaning app data..." + adb uninstall periodvault.androidApp 2>/dev/null || true + adb uninstall periodvault.androidApp.test 2>/dev/null || true + + echo "Running Android instrumented tests..." + local GRADLE_PID + local GRADLE_EXIT=0 + local TOTAL_ANDROID_TESTS=0 + TOTAL_ANDROID_TESTS=$(find androidApp/src/androidTest -name '*.kt' -type f -exec grep -hE '@Test' {} + 2>/dev/null | wc -l | tr -d ' ') + if [[ -z "$TOTAL_ANDROID_TESTS" ]]; then + TOTAL_ANDROID_TESTS=0 + fi + ./gradlew androidApp:connectedDebugAndroidTest 2>&1 & + GRADLE_PID=$! + + # Progress/liveness watchdog: + # - emits heartbeat every 30s with completed Android test cases and emulator health + # - kills early only if emulator is unresponsive and test progress is stalled for 10m + # - retains a generous hard timeout as last-resort safety net + local HEARTBEAT_SECONDS=30 + local UNRESPONSIVE_STALL_TIMEOUT_SECONDS=600 + local HARD_TIMEOUT_SECONDS=7200 # 2 hours + ( + local start_ts now_ts elapsed + local last_progress_ts + local completed=0 + local last_completed=0 + local stale_seconds=0 + local emu_health="" + + start_ts=$(date +%s) + last_progress_ts=$start_ts + + while kill -0 $GRADLE_PID 2>/dev/null; do + sleep "$HEARTBEAT_SECONDS" + now_ts=$(date +%s) + elapsed=$((now_ts - start_ts)) + + completed=$(find androidApp/build/outputs/androidTest-results/connected -name '*.xml' -type f -exec grep -ho "/dev/null | wc -l | tr -d ' ') + if [[ -z "$completed" ]]; then + completed=0 + fi + + if [[ "$completed" -gt "$last_completed" ]]; then + last_progress_ts=$now_ts + last_completed=$completed + fi + + if adb shell getprop sys.boot_completed 2>/dev/null | grep -q "1"; then + emu_health="responsive" + else + emu_health="UNRESPONSIVE" + fi + + stale_seconds=$((now_ts - last_progress_ts)) + local elapsed_mm elapsed_ss + elapsed_mm=$((elapsed / 60)) + elapsed_ss=$((elapsed % 60)) + + if [[ "$TOTAL_ANDROID_TESTS" -gt 0 ]]; then + echo "Android progress: ${completed}/${TOTAL_ANDROID_TESTS} tests complete | elapsed ${elapsed_mm}m${elapsed_ss}s | emulator ${emu_health}" + else + echo "Android progress: ${completed} tests complete | elapsed ${elapsed_mm}m${elapsed_ss}s | emulator ${emu_health}" + fi + + if [[ "$elapsed" -ge "$HARD_TIMEOUT_SECONDS" ]]; then + echo "WATCHDOG: killing Gradle (PID $GRADLE_PID) after hard timeout ${HARD_TIMEOUT_SECONDS}s" + kill $GRADLE_PID 2>/dev/null || true + sleep 5 + kill -9 $GRADLE_PID 2>/dev/null || true + break + fi + + if [[ "$emu_health" == "UNRESPONSIVE" ]] && [[ "$stale_seconds" -ge "$UNRESPONSIVE_STALL_TIMEOUT_SECONDS" ]]; then + echo "WATCHDOG: killing Gradle (PID $GRADLE_PID) - emulator unresponsive and no progress for ${stale_seconds}s" + kill $GRADLE_PID 2>/dev/null || true + sleep 5 + kill -9 $GRADLE_PID 2>/dev/null || true + break + fi + done + ) & + local WATCHDOG_PID=$! + wait $GRADLE_PID 2>/dev/null || GRADLE_EXIT=$? + kill $WATCHDOG_PID 2>/dev/null || true + wait $WATCHDOG_PID 2>/dev/null || true + + if [[ $GRADLE_EXIT -eq 137 ]] || [[ $GRADLE_EXIT -eq 143 ]]; then + echo -e "${RED}Android emulator tests terminated by watchdog${NC}" + ANDROID_FAIL=1 + run_and_log "adb_restore_animations" adb shell "settings put global window_animation_scale 1; settings put global transition_animation_scale 1; settings put global animator_duration_scale 1" || true + return 1 + elif [[ $GRADLE_EXIT -eq 0 ]]; then + echo -e "${GREEN}Android emulator tests PASSED${NC}" + ANDROID_PASS=1 + # Emit runtime evidence for CI tracking + local android_duration_s="" + local android_test_count="" + if [[ -f androidApp/build/reports/androidTests/connected/debug/index.html ]]; then + android_test_count="$(grep -o '
[0-9]*
' androidApp/build/reports/androidTests/connected/debug/index.html | head -1 | grep -o '[0-9]*' || echo "")" + android_duration_s="$(grep -o '
[0-9a-z.]*s
' androidApp/build/reports/androidTests/connected/debug/index.html | head -1 | grep -o '[0-9.]*' || echo "")" + fi + echo "RUNTIME_EVIDENCE: {\"suite\": \"android_ui\", \"tests\": ${android_test_count:-0}, \"duration\": \"${android_duration_s:-unknown}s\", \"timestamp\": \"$(date -u '+%Y-%m-%dT%H:%M:%SZ')\"}" + else + echo -e "${RED}Android emulator tests FAILED${NC}" + ANDROID_FAIL=1 + echo "Test reports: androidApp/build/reports/androidTests/connected/debug/" + run_and_log "adb_restore_animations" adb shell "settings put global window_animation_scale 1; settings put global transition_animation_scale 1; settings put global animator_duration_scale 1" || true + return 1 + fi + + # Re-enable animations + run_and_log "adb_restore_animations" adb shell "settings put global window_animation_scale 1; settings put global transition_animation_scale 1; settings put global animator_duration_scale 1" || true +} + +run_ios() { + echo -e "${YELLOW}=== iOS Simulator Tests ===${NC}" + + # Find an available simulator + local SIM_ID + SIM_ID=$(xcrun simctl list devices available -j 2>/dev/null | python3 -c " +import json, sys +data = json.load(sys.stdin) +for runtime, devices in data.get('devices', {}).items(): + if 'iOS' in runtime: + for d in devices: + if d.get('isAvailable'): + print(d['udid']) + sys.exit(0) +sys.exit(1) +" 2>/dev/null) || true + + if [[ -z "$SIM_ID" ]]; then + echo -e "${RED}ERROR: No available iOS simulator found.${NC}" + IOS_FAIL=1 + return 1 + fi + + local SIM_NAME + SIM_NAME=$(xcrun simctl list devices available | grep "$SIM_ID" | sed 's/ (.*//' | xargs) + echo "Using simulator: $SIM_NAME ($SIM_ID)" + + # Boot simulator if needed + xcrun simctl boot "$SIM_ID" 2>/dev/null || true + + # Health check: verify simulator is actually responsive (not just "Booted" in simctl) + echo "Verifying simulator is responsive..." + local HEALTH_OK=false + for i in 1 2 3 4 5; do + if xcrun simctl spawn "$SIM_ID" launchctl print system >/dev/null 2>&1; then + HEALTH_OK=true + break + fi + echo " Attempt $i/5: simulator not responsive, waiting 5s..." + sleep 5 + done + if [[ "$HEALTH_OK" != "true" ]]; then + echo -e "${RED}ERROR: Simulator $SIM_NAME ($SIM_ID) reports Booted but is not responsive.${NC}" + echo "Attempting full restart..." + xcrun simctl shutdown "$SIM_ID" 2>/dev/null || true + sleep 3 + xcrun simctl boot "$SIM_ID" 2>/dev/null || true + sleep 10 + if ! xcrun simctl spawn "$SIM_ID" launchctl print system >/dev/null 2>&1; then + echo -e "${RED}ERROR: Simulator still unresponsive after restart. Aborting.${NC}" + IOS_FAIL=1 + return 1 + fi + echo "Simulator recovered after restart." + fi + echo "Simulator is responsive." + + # Generate Xcode project if needed + if [[ ! -f iosApp/iosApp.xcodeproj/project.pbxproj ]]; then + echo "Generating Xcode project..." + (cd iosApp && xcodegen generate) + fi + + # --- Phase 1: Build (synchronous, fail-fast) --- + echo "Building iOS UI tests..." + local BUILD_DIR + BUILD_DIR=$(mktemp -d) + local BUILD_LOG + BUILD_LOG=$(mktemp) + local BUILD_START + BUILD_START=$(date +%s) + + xcodebuild build-for-testing \ + -project iosApp/iosApp.xcodeproj \ + -scheme iosApp \ + -destination "platform=iOS Simulator,id=$SIM_ID" \ + -derivedDataPath "$BUILD_DIR" \ + > "$BUILD_LOG" 2>&1 + + local BUILD_EXIT=$? + local BUILD_END + BUILD_END=$(date +%s) + echo "iOS build phase: $((BUILD_END - BUILD_START))s (exit=$BUILD_EXIT)" + + if [[ $BUILD_EXIT -ne 0 ]]; then + echo -e "${RED}BUILD FAILED — last 30 lines:${NC}" + tail -30 "$BUILD_LOG" + rm -f "$BUILD_LOG" + rm -rf "$BUILD_DIR" + IOS_FAIL=1 + return 1 + fi + rm -f "$BUILD_LOG" + + # Disable animations for stable, faster UI tests + echo "Disabling simulator animations..." + xcrun simctl spawn "$SIM_ID" defaults write com.apple.Accessibility ReduceMotionEnabled -bool YES 2>/dev/null || true + + # Uninstall the app to ensure a clean database for tests + echo "Cleaning app data..." + xcrun simctl uninstall "$SIM_ID" com.periodvault.app 2>/dev/null || true + + # --- Phase 2: Test (background with watchdog, parallel execution) --- + echo "Running iOS UI tests (parallel enabled)..." + local TEST_EXIT=0 + local TEST_LOG + TEST_LOG=$(mktemp) + local RESULT_BUNDLE_DIR + RESULT_BUNDLE_DIR=$(mktemp -d) + local RESULT_BUNDLE_PATH="$RESULT_BUNDLE_DIR/ios-ui-tests.xcresult" + local TOTAL_IOS_TESTS=0 + TOTAL_IOS_TESTS=$(find iosApp/iosAppUITests -name '*.swift' -print0 2>/dev/null | xargs -0 grep -hE '^[[:space:]]*func[[:space:]]+test' 2>/dev/null | wc -l | tr -d ' ') + if [[ -z "$TOTAL_IOS_TESTS" ]]; then + TOTAL_IOS_TESTS=0 + fi + local TEST_START + TEST_START=$(date +%s) + + xcodebuild test-without-building \ + -project iosApp/iosApp.xcodeproj \ + -scheme iosApp \ + -destination "platform=iOS Simulator,id=$SIM_ID" \ + -only-testing:iosAppUITests \ + -derivedDataPath "$BUILD_DIR" \ + -resultBundlePath "$RESULT_BUNDLE_PATH" \ + -parallel-testing-enabled YES \ + > "$TEST_LOG" 2>&1 & + local XCODE_PID=$! + + # Progress/liveness watchdog: + # - emits heartbeat with completed test count and simulator health + # - fails fast when CoreSimulatorService is unhealthy + # - treats test completion, xcodebuild CPU, and log growth as activity + # - fails when startup/test activity stalls beyond configured thresholds + # - keeps a hard cap as a final safety net + local HEARTBEAT_SECONDS="${IOS_HEARTBEAT_SECONDS:-30}" + local STARTUP_PROGRESS_TIMEOUT_SECONDS="${IOS_STARTUP_PROGRESS_TIMEOUT_SECONDS:-900}" + local TEST_STALL_TIMEOUT_SECONDS="${IOS_TEST_STALL_TIMEOUT_SECONDS:-480}" + local UNRESPONSIVE_STALL_TIMEOUT_SECONDS="${IOS_UNRESPONSIVE_STALL_TIMEOUT_SECONDS:-120}" + local HARD_TIMEOUT_SECONDS="${IOS_HARD_TIMEOUT_SECONDS:-10800}" # 3 hours + local ACTIVE_CPU_THRESHOLD="${IOS_ACTIVE_CPU_THRESHOLD:-1.0}" + + echo "iOS watchdog: heartbeat=${HEARTBEAT_SECONDS}s startup_timeout=${STARTUP_PROGRESS_TIMEOUT_SECONDS}s test_stall_timeout=${TEST_STALL_TIMEOUT_SECONDS}s unresponsive_timeout=${UNRESPONSIVE_STALL_TIMEOUT_SECONDS}s hard_timeout=${HARD_TIMEOUT_SECONDS}s cpu_active_threshold=${ACTIVE_CPU_THRESHOLD}%" + ( + local start_ts now_ts elapsed + local last_test_progress_ts + local last_activity_ts + local completed=0 + local last_completed=0 + local stale_seconds=0 + local sim_health="" + local first_test_seen=false + local simctl_health_output="" + local log_size=0 + local last_log_size=0 + local xcode_cpu="0.0" + local xcode_cpu_raw="" + + start_ts=$(date +%s) + last_test_progress_ts=$start_ts + last_activity_ts=$start_ts + + while kill -0 $XCODE_PID 2>/dev/null; do + sleep "$HEARTBEAT_SECONDS" + now_ts=$(date +%s) + elapsed=$((now_ts - start_ts)) + + # Keep watchdog alive before first completed test appears; do not fail on zero matches. + completed=$(grep -E -c "Test [Cc]ase .* (passed|failed)" "$TEST_LOG" 2>/dev/null || true) + if [[ -z "$completed" ]]; then + completed=0 + fi + + if [[ "$completed" -gt "$last_completed" ]]; then + last_test_progress_ts=$now_ts + last_activity_ts=$now_ts + last_completed=$completed + first_test_seen=true + fi + + # xcodebuild output growth indicates ongoing work even when a test has not completed yet. + log_size=$(wc -c < "$TEST_LOG" 2>/dev/null || echo 0) + if [[ -n "$log_size" ]] && [[ "$log_size" -gt "$last_log_size" ]]; then + last_log_size=$log_size + last_activity_ts=$now_ts + fi + + # CPU usage provides another liveness signal during long-running UI tests. + xcode_cpu_raw=$(ps -p "$XCODE_PID" -o %cpu= 2>/dev/null | tr -d ' ' || true) + if [[ -n "$xcode_cpu_raw" ]]; then + xcode_cpu="$xcode_cpu_raw" + else + xcode_cpu="0.0" + fi + if awk "BEGIN { exit !($xcode_cpu >= $ACTIVE_CPU_THRESHOLD) }"; then + last_activity_ts=$now_ts + fi + + if simctl_health_output=$(xcrun simctl spawn "$SIM_ID" launchctl print system 2>&1); then + sim_health="responsive" + else + sim_health="UNRESPONSIVE" + + # Fail fast when the simulator service itself is down. Waiting longer does not recover this state. + if echo "$simctl_health_output" | grep -Eiq "CoreSimulatorService connection became invalid|not connected to CoreSimulatorService|Unable to locate device set|Connection refused|simdiskimaged.*(crashed|not responding)|Unable to discover any Simulator runtimes"; then + echo "WATCHDOG: CoreSimulatorService unhealthy; killing xcodebuild (PID $XCODE_PID) immediately" + echo "$simctl_health_output" | head -5 | sed 's/^/ simctl: /' + kill $XCODE_PID 2>/dev/null || true + sleep 5 + kill -9 $XCODE_PID 2>/dev/null || true + break + fi + fi + + stale_seconds=$((now_ts - last_activity_ts)) + local elapsed_mm elapsed_ss + elapsed_mm=$((elapsed / 60)) + elapsed_ss=$((elapsed % 60)) + + if [[ "$TOTAL_IOS_TESTS" -gt 0 ]]; then + echo "iOS progress: ${completed}/${TOTAL_IOS_TESTS} tests complete | elapsed ${elapsed_mm}m${elapsed_ss}s | simulator ${sim_health} | xcodebuild cpu ${xcode_cpu}%" + else + echo "iOS progress: ${completed} tests complete | elapsed ${elapsed_mm}m${elapsed_ss}s | simulator ${sim_health} | xcodebuild cpu ${xcode_cpu}%" + fi + + if [[ "$elapsed" -ge "$HARD_TIMEOUT_SECONDS" ]]; then + echo "WATCHDOG: killing xcodebuild (PID $XCODE_PID) after hard timeout ${HARD_TIMEOUT_SECONDS}s" + kill $XCODE_PID 2>/dev/null || true + sleep 5 + kill -9 $XCODE_PID 2>/dev/null || true + break + fi + + if [[ "$first_test_seen" != "true" ]] && [[ "$elapsed" -ge "$STARTUP_PROGRESS_TIMEOUT_SECONDS" ]]; then + echo "WATCHDOG: killing xcodebuild (PID $XCODE_PID) - no completed iOS test observed within startup timeout (${STARTUP_PROGRESS_TIMEOUT_SECONDS}s)" + kill $XCODE_PID 2>/dev/null || true + sleep 5 + kill -9 $XCODE_PID 2>/dev/null || true + break + fi + + if [[ "$first_test_seen" == "true" ]] && [[ "$stale_seconds" -ge "$TEST_STALL_TIMEOUT_SECONDS" ]]; then + echo "WATCHDOG: killing xcodebuild (PID $XCODE_PID) - no iOS test activity for ${stale_seconds}s" + kill $XCODE_PID 2>/dev/null || true + sleep 5 + kill -9 $XCODE_PID 2>/dev/null || true + break + fi + + if [[ "$sim_health" == "UNRESPONSIVE" ]] && [[ "$stale_seconds" -ge "$UNRESPONSIVE_STALL_TIMEOUT_SECONDS" ]]; then + echo "WATCHDOG: killing xcodebuild (PID $XCODE_PID) - simulator unresponsive and no test activity for ${stale_seconds}s" + kill $XCODE_PID 2>/dev/null || true + sleep 5 + kill -9 $XCODE_PID 2>/dev/null || true + break + fi + done + ) & + local WATCHDOG_PID=$! + + wait $XCODE_PID 2>/dev/null || TEST_EXIT=$? + kill $WATCHDOG_PID 2>/dev/null || true + wait $WATCHDOG_PID 2>/dev/null || true + + local TEST_END + TEST_END=$(date +%s) + echo "iOS test phase: $((TEST_END - TEST_START))s (exit=$TEST_EXIT)" + echo "iOS total (build+test): $((TEST_END - BUILD_START))s" + + # Show test summary (passed/failed counts and any failures) + echo "--- Test Results ---" + grep -E "Test [Cc]ase .* (passed|failed)" "$TEST_LOG" || true + echo "" + echo "--- Failures ---" + grep -E "(FAIL|error:|\*\* TEST FAILED)" "$TEST_LOG" || echo " (none)" + echo "" + echo "--- Last 20 lines ---" + tail -20 "$TEST_LOG" + rm -f "$TEST_LOG" + + if [[ $TEST_EXIT -eq 0 ]]; then + local SKIP_ALLOWLIST="${IOS_SKIPPED_TESTS_ALLOWLIST:-audit/ios-skipped-tests-allowlist.txt}" + if ! bash "$SCRIPT_DIR/validate-ios-skipped-tests.sh" "$RESULT_BUNDLE_PATH" "$SKIP_ALLOWLIST"; then + echo -e "${RED}iOS skipped-test gate FAILED${NC}" + TEST_EXIT=1 + fi + fi + + rm -rf "$RESULT_BUNDLE_DIR" + rm -rf "$BUILD_DIR" + + # Re-enable animations + xcrun simctl spawn "$SIM_ID" defaults write com.apple.Accessibility ReduceMotionEnabled -bool NO 2>/dev/null || true + + if [[ $TEST_EXIT -eq 137 ]] || [[ $TEST_EXIT -eq 143 ]]; then + echo -e "${RED}iOS simulator tests terminated by watchdog${NC}" + IOS_FAIL=1 + return 1 + elif [[ $TEST_EXIT -eq 0 ]]; then + echo -e "${GREEN}iOS simulator tests PASSED${NC}" + IOS_PASS=1 + # Emit runtime evidence for CI tracking + local ios_test_count="" + ios_test_count="$TOTAL_IOS_TESTS" + local ios_elapsed_s="" + ios_elapsed_s="$(($(date +%s) - $(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$(date -u '+%Y-%m-%dT%H:%M:%SZ')" +%s 2>/dev/null || echo 0)))" + echo "RUNTIME_EVIDENCE: {\"suite\": \"ios_ui\", \"tests\": ${ios_test_count:-0}, \"timestamp\": \"$(date -u '+%Y-%m-%dT%H:%M:%SZ')\"}" + else + echo -e "${RED}iOS simulator tests FAILED${NC}" + IOS_FAIL=1 + return 1 + fi +} + +echo "================================================" +echo " PeriodVault Emulator/Simulator Test Runner" +echo "================================================" +echo "" + +case "$PLATFORM" in + android) + run_android + ;; + ios) + run_ios + ;; + all) + run_android || true + echo "" + run_ios || true + ;; + *) + echo "Usage: $0 [android|ios|all]" + exit 1 + ;; +esac + +echo "" +echo "================================================" +echo " Results Summary" +echo "================================================" +if [[ "$PLATFORM" == "all" || "$PLATFORM" == "android" ]]; then + if [[ $ANDROID_PASS -eq 1 ]]; then + echo -e " Android: ${GREEN}PASSED${NC}" + elif [[ $ANDROID_FAIL -eq 1 ]]; then + echo -e " Android: ${RED}FAILED${NC}" + else + echo -e " Android: ${YELLOW}SKIPPED${NC}" + fi +fi +if [[ "$PLATFORM" == "all" || "$PLATFORM" == "ios" ]]; then + if [[ $IOS_PASS -eq 1 ]]; then + echo -e " iOS: ${GREEN}PASSED${NC}" + elif [[ $IOS_FAIL -eq 1 ]]; then + echo -e " iOS: ${RED}FAILED${NC}" + else + echo -e " iOS: ${YELLOW}SKIPPED${NC}" + fi +fi +echo "================================================" + +# Exit with failure if any platform failed +if [[ $ANDROID_FAIL -eq 1 ]] || [[ $IOS_FAIL -eq 1 ]]; then + exit 1 +fi diff --git a/runners-conversion/periodVault/runner.sh b/runners-conversion/periodVault/runner.sh new file mode 100755 index 0000000..96fc71b --- /dev/null +++ b/runners-conversion/periodVault/runner.sh @@ -0,0 +1,730 @@ +#!/usr/bin/env bash +# runner.sh — Setup, manage, and tear down a GitHub Actions self-hosted runner. +# +# Supports two platforms: +# - macOS: Installs the runner agent natively, manages it as a launchd service. +# - Linux: Delegates to Docker-based runner infrastructure in infra/runners/. +# +# Typical flow: +# 1) ./scripts/runner.sh --mode setup # install/configure runner +# 2) ./scripts/runner.sh --mode status # verify runner is online +# 3) (push/PR triggers CI on the self-hosted runner) +# 4) ./scripts/runner.sh --mode stop # stop runner +# 5) ./scripts/runner.sh --mode uninstall # deregister and clean up + +set -euo pipefail + +MODE="" +RUNNER_DIR="${PERIODVAULT_RUNNER_DIR:-${HOME}/.periodvault-runner}" +RUNNER_LABELS="self-hosted,macOS,periodvault" +RUNNER_NAME="" +REPO_SLUG="" +REG_TOKEN="" +FORCE=false +FOREGROUND=false +PUSH_REGISTRY="" +BUILD_TARGET="" + +PLIST_LABEL="com.periodvault.actions-runner" +PLIST_PATH="${HOME}/Library/LaunchAgents/${PLIST_LABEL}.plist" + +# Resolved during Linux operations +INFRA_DIR="" + +usage() { + cat <<'EOF' +Usage: + ./scripts/runner.sh --mode [options] + +Required: + --mode MODE One of: setup, start, stop, status, build-image, uninstall + +Options (macOS): + --runner-dir DIR Installation directory (default: ~/.periodvault-runner) + --labels LABELS Comma-separated labels (default: self-hosted,macOS,periodvault) + --name NAME Runner name (default: periodvault-) + --repo OWNER/REPO GitHub repository (default: auto-detected from git remote) + --token TOKEN Registration/removal token (prompted if not provided) + --force Force re-setup even if already configured + --foreground Start in foreground instead of launchd service + +Options (Linux — Docker mode): + On Linux, this script delegates to Docker Compose in infra/runners/. + Configuration is managed via .env and envs/*.env files. + See infra/runners/README.md for details. + +Options (build-image): + --target TARGET Dockerfile target: slim or full (default: builds both) + --push REGISTRY Tag and push to a registry (e.g. localhost:5000) + +Common: + -h, --help Show this help + +Examples (macOS): + ./scripts/runner.sh --mode setup + ./scripts/runner.sh --mode setup --token ghp_xxxxx + ./scripts/runner.sh --mode start + ./scripts/runner.sh --mode start --foreground + ./scripts/runner.sh --mode status + ./scripts/runner.sh --mode stop + ./scripts/runner.sh --mode uninstall + +Examples (Linux): + ./scripts/runner.sh --mode setup # prompts for .env, starts runners + ./scripts/runner.sh --mode start # docker compose up -d + ./scripts/runner.sh --mode stop # docker compose down + ./scripts/runner.sh --mode status # docker compose ps + logs + ./scripts/runner.sh --mode uninstall # docker compose down -v --rmi local + +Examples (build-image — works on any OS): + ./scripts/runner.sh --mode build-image # build slim + full + ./scripts/runner.sh --mode build-image --target slim # build slim only + ./scripts/runner.sh --mode build-image --push localhost:5000 # build + push to local registry + +Environment overrides: + PERIODVAULT_RUNNER_DIR Runner installation directory (macOS only) +EOF +} + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +log() { + printf '[runner] %s\n' "$*" +} + +warn() { + printf '[runner] WARNING: %s\n' "$*" >&2 +} + +die() { + printf '[runner] ERROR: %s\n' "$*" >&2 + exit 1 +} + +require_cmd() { + local cmd="$1" + command -v "$cmd" >/dev/null 2>&1 || die "required command not found: $cmd" +} + +# --------------------------------------------------------------------------- +# Platform detection +# --------------------------------------------------------------------------- + +detect_os() { + case "$(uname -s)" in + Darwin) printf 'darwin' ;; + Linux) printf 'linux' ;; + *) die "Unsupported OS: $(uname -s). This script supports macOS and Linux." ;; + esac +} + +ensure_macos() { + [[ "$(detect_os)" == "darwin" ]] || die "This operation requires macOS." +} + +find_infra_dir() { + local script_dir + script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + local repo_root="${script_dir}/.." + INFRA_DIR="$(cd "${repo_root}/infra/runners" 2>/dev/null && pwd)" || true + + if [[ -z "$INFRA_DIR" ]] || [[ ! -f "${INFRA_DIR}/docker-compose.yml" ]]; then + die "Could not find infra/runners/docker-compose.yml. Ensure you are running from the periodvault repo." + fi +} + +# --------------------------------------------------------------------------- +# Argument parsing +# --------------------------------------------------------------------------- + +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + --mode) + shift; [[ $# -gt 0 ]] || die "--mode requires a value" + MODE="$1"; shift ;; + --runner-dir) + shift; [[ $# -gt 0 ]] || die "--runner-dir requires a value" + RUNNER_DIR="$1"; shift ;; + --labels) + shift; [[ $# -gt 0 ]] || die "--labels requires a value" + RUNNER_LABELS="$1"; shift ;; + --name) + shift; [[ $# -gt 0 ]] || die "--name requires a value" + RUNNER_NAME="$1"; shift ;; + --repo) + shift; [[ $# -gt 0 ]] || die "--repo requires a value" + REPO_SLUG="$1"; shift ;; + --token) + shift; [[ $# -gt 0 ]] || die "--token requires a value" + REG_TOKEN="$1"; shift ;; + --target) + shift; [[ $# -gt 0 ]] || die "--target requires a value (slim or full)" + BUILD_TARGET="$1"; shift ;; + --force) + FORCE=true; shift ;; + --foreground) + FOREGROUND=true; shift ;; + --push) + shift; [[ $# -gt 0 ]] || die "--push requires a registry address (e.g. localhost:5000)" + PUSH_REGISTRY="$1"; shift ;; + -h|--help) + usage; exit 0 ;; + *) + die "unknown argument: $1" ;; + esac + done + + [[ -n "$MODE" ]] || die "--mode is required (setup|start|stop|status|build-image|uninstall)" + case "$MODE" in + setup|start|stop|status|build-image|uninstall) ;; + *) die "invalid --mode: $MODE (expected setup|start|stop|status|build-image|uninstall)" ;; + esac +} + +# --------------------------------------------------------------------------- +# Repo detection +# --------------------------------------------------------------------------- + +detect_repo() { + if [[ -n "$REPO_SLUG" ]]; then + return + fi + + local remote_url="" + remote_url="$(git remote get-url origin 2>/dev/null || true)" + if [[ -z "$remote_url" ]]; then + die "Could not detect repository from git remote. Use --repo OWNER/REPO." + fi + + REPO_SLUG="$(printf '%s' "$remote_url" \ + | sed -E 's#^(https?://github\.com/|git@github\.com:)##' \ + | sed -E 's/\.git$//')" + + if [[ -z "$REPO_SLUG" ]] || ! printf '%s' "$REPO_SLUG" | grep -qE '^[^/]+/[^/]+$'; then + die "Could not parse OWNER/REPO from remote URL: $remote_url. Use --repo OWNER/REPO." + fi + + log "Auto-detected repository: $REPO_SLUG" +} + +# =========================================================================== +# macOS: Native runner agent + launchd service +# =========================================================================== + +detect_arch() { + local arch + arch="$(uname -m)" + case "$arch" in + arm64|aarch64) printf 'arm64' ;; + x86_64) printf 'x64' ;; + *) die "Unsupported architecture: $arch" ;; + esac +} + +download_runner() { + require_cmd curl + require_cmd shasum + require_cmd tar + + local arch + arch="$(detect_arch)" + + log "Fetching latest runner release metadata..." + local release_json + release_json="$(curl -fsSL "https://api.github.com/repos/actions/runner/releases/latest")" + + local version + version="$(printf '%s' "$release_json" | grep '"tag_name"' | sed -E 's/.*"v([^"]+)".*/\1/')" + if [[ -z "$version" ]]; then + die "Could not determine latest runner version from GitHub API." + fi + log "Latest runner version: $version" + + local tarball="actions-runner-osx-${arch}-${version}.tar.gz" + local download_url="https://github.com/actions/runner/releases/download/v${version}/${tarball}" + + local sha_marker="osx-${arch}" + local expected_sha="" + expected_sha="$(printf '%s' "$release_json" \ + | python3 -c " +import json,sys,re +body = json.load(sys.stdin).get('body','') +m = re.search(r'([0-9a-f]{64})', body) +print(m.group(1) if m else '') +" 2>/dev/null || true)" + + mkdir -p "$RUNNER_DIR" + local dest="${RUNNER_DIR}/${tarball}" + + if [[ -f "$dest" ]]; then + log "Tarball already exists: $dest" + else + log "Downloading: $download_url" + curl -fSL -o "$dest" "$download_url" + fi + + if [[ -n "$expected_sha" ]]; then + log "Verifying SHA256 checksum..." + local actual_sha + actual_sha="$(shasum -a 256 "$dest" | awk '{print $1}')" + if [[ "$actual_sha" != "$expected_sha" ]]; then + rm -f "$dest" + die "Checksum mismatch. Expected: $expected_sha, Got: $actual_sha" + fi + log "Checksum verified." + else + warn "Could not extract expected SHA256 from release metadata; skipping verification." + fi + + log "Extracting runner into $RUNNER_DIR..." + tar -xzf "$dest" -C "$RUNNER_DIR" + rm -f "$dest" + + log "Runner extracted (version $version)." +} + +prompt_token() { + if [[ -n "$REG_TOKEN" ]]; then + return + fi + + log "" + log "A registration token is required." + log "Obtain one from: https://github.com/${REPO_SLUG}/settings/actions/runners/new" + log "Or via the API:" + log " curl -X POST -H 'Authorization: token YOUR_PAT' \\" + log " https://api.github.com/repos/${REPO_SLUG}/actions/runners/registration-token" + log "" + printf '[runner] Enter registration token: ' + read -r REG_TOKEN + [[ -n "$REG_TOKEN" ]] || die "No token provided." +} + +register_runner() { + if [[ -z "$RUNNER_NAME" ]]; then + RUNNER_NAME="periodvault-$(hostname -s)" + fi + + log "Registering runner '${RUNNER_NAME}' with labels '${RUNNER_LABELS}'..." + + local config_args=( + --url "https://github.com/${REPO_SLUG}" + --token "$REG_TOKEN" + --name "$RUNNER_NAME" + --labels "$RUNNER_LABELS" + --work "${RUNNER_DIR}/_work" + --unattended + ) + + if [[ "$FORCE" == "true" ]]; then + config_args+=(--replace) + fi + + "${RUNNER_DIR}/config.sh" "${config_args[@]}" + log "Runner registered." +} + +# --------------------------------------------------------------------------- +# launchd service management (macOS) +# --------------------------------------------------------------------------- + +create_plist() { + mkdir -p "${RUNNER_DIR}/logs" + mkdir -p "$(dirname "$PLIST_PATH")" + + cat > "$PLIST_PATH" < + + + + Label + ${PLIST_LABEL} + ProgramArguments + + ${RUNNER_DIR}/run.sh + + WorkingDirectory + ${RUNNER_DIR} + RunAtLoad + + KeepAlive + + StandardOutPath + ${RUNNER_DIR}/logs/stdout.log + StandardErrorPath + ${RUNNER_DIR}/logs/stderr.log + EnvironmentVariables + + PATH + /opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin + HOME + ${HOME} + + + +EOF + + log "Launchd plist created: $PLIST_PATH" +} + +load_service() { + if launchctl list 2>/dev/null | grep -q "$PLIST_LABEL"; then + log "Service already loaded; unloading first..." + launchctl unload "$PLIST_PATH" 2>/dev/null || true + fi + + launchctl load "$PLIST_PATH" + log "Service loaded." +} + +unload_service() { + if launchctl list 2>/dev/null | grep -q "$PLIST_LABEL"; then + launchctl unload "$PLIST_PATH" 2>/dev/null || true + log "Service unloaded." + else + log "Service is not loaded." + fi +} + +service_is_running() { + launchctl list 2>/dev/null | grep -q "$PLIST_LABEL" +} + +# --------------------------------------------------------------------------- +# macOS mode implementations +# --------------------------------------------------------------------------- + +do_setup_darwin() { + detect_repo + + if [[ -f "${RUNNER_DIR}/.runner" ]] && [[ "$FORCE" != "true" ]]; then + log "Runner already configured at $RUNNER_DIR." + log "Use --force to re-setup." + do_status_darwin + return + fi + + download_runner + prompt_token + register_runner + create_plist + load_service + + log "" + log "Setup complete. Runner is registered and running." + log "" + log "To activate self-hosted CI, set these repository variables:" + log ' CI_RUNS_ON_MACOS: ["self-hosted", "macOS", "periodvault"]' + log "" + log "Via CLI:" + log ' gh variable set CI_RUNS_ON_MACOS --body '"'"'["self-hosted","macOS","periodvault"]'"'" + log "" + log "Energy saver: ensure your Mac does not sleep while the runner is active." + log " System Settings > Energy Saver > Prevent automatic sleeping" +} + +do_start_darwin() { + [[ -f "${RUNNER_DIR}/.runner" ]] || die "Runner not configured. Run --mode setup first." + + if [[ "$FOREGROUND" == "true" ]]; then + log "Starting runner in foreground (Ctrl-C to stop)..." + exec "${RUNNER_DIR}/run.sh" + fi + + if service_is_running; then + log "Runner service is already running." + return + fi + + if [[ ! -f "$PLIST_PATH" ]]; then + log "Plist not found; recreating..." + create_plist + fi + + load_service + log "Runner started." +} + +do_stop_darwin() { + unload_service + log "Runner stopped." +} + +do_status_darwin() { + log "Runner directory: $RUNNER_DIR" + + if [[ ! -f "${RUNNER_DIR}/.runner" ]]; then + log "Status: NOT CONFIGURED" + log "Run --mode setup to install and register the runner." + return + fi + + local runner_name="" + if command -v python3 >/dev/null 2>&1; then + runner_name="$(python3 -c "import json,sys; d=json.load(open(sys.argv[1])); print(d.get('agentName',''))" "${RUNNER_DIR}/.runner" 2>/dev/null || true)" + fi + if [[ -z "$runner_name" ]]; then + runner_name="(could not parse)" + fi + + log "Runner name: $runner_name" + + if service_is_running; then + log "Service: RUNNING" + else + log "Service: STOPPED" + fi + + if pgrep -f "Runner.Listener" >/dev/null 2>&1; then + log "Process: ACTIVE (Runner.Listener found)" + else + log "Process: INACTIVE" + fi + + local log_file="${RUNNER_DIR}/logs/stdout.log" + if [[ -f "$log_file" ]]; then + log "" + log "Recent log output (last 10 lines):" + tail -n 10 "$log_file" 2>/dev/null || true + fi + + local diag_dir="${RUNNER_DIR}/_diag" + if [[ -d "$diag_dir" ]]; then + local latest_diag + latest_diag="$(ls -t "${diag_dir}"/Runner_*.log 2>/dev/null | head -n1 || true)" + if [[ -n "$latest_diag" ]]; then + log "" + log "Latest runner diagnostic (last 5 lines):" + tail -n 5 "$latest_diag" 2>/dev/null || true + fi + fi +} + +do_uninstall_darwin() { + log "Uninstalling self-hosted runner..." + + unload_service + + if [[ -f "$PLIST_PATH" ]]; then + rm -f "$PLIST_PATH" + log "Removed plist: $PLIST_PATH" + fi + + if [[ -f "${RUNNER_DIR}/config.sh" ]]; then + if [[ -z "$REG_TOKEN" ]]; then + detect_repo + log "" + log "A removal token is required to deregister the runner." + log "Obtain one from: https://github.com/${REPO_SLUG}/settings/actions/runners" + log "Or via the API:" + log " curl -X POST -H 'Authorization: token YOUR_PAT' \\" + log " https://api.github.com/repos/${REPO_SLUG}/actions/runners/remove-token" + log "" + printf '[runner] Enter removal token (or press Enter to skip deregistration): ' + read -r REG_TOKEN + fi + + if [[ -n "$REG_TOKEN" ]]; then + "${RUNNER_DIR}/config.sh" remove --token "$REG_TOKEN" || warn "Deregistration failed; you may need to remove the runner manually from GitHub settings." + log "Runner deregistered from GitHub." + else + warn "Skipping deregistration. Remove the runner manually from GitHub settings." + fi + fi + + if [[ -d "$RUNNER_DIR" ]]; then + log "Removing runner directory: $RUNNER_DIR" + rm -rf "$RUNNER_DIR" + log "Runner directory removed." + fi + + log "Uninstall complete." +} + +# =========================================================================== +# Linux: Docker-based runner via infra/runners/ +# =========================================================================== + +ensure_docker() { + require_cmd docker + + if docker compose version >/dev/null 2>&1; then + return + fi + + if command -v docker-compose >/dev/null 2>&1; then + warn "Found docker-compose (standalone). docker compose v2 plugin is recommended." + return + fi + + die "docker compose is required. Install Docker Compose v2: https://docs.docker.com/compose/install/" +} + +compose() { + docker compose -f "${INFRA_DIR}/docker-compose.yml" "$@" +} + +do_build_image() { + find_infra_dir + ensure_docker + + local targets=() + if [[ -n "$BUILD_TARGET" ]]; then + targets+=("$BUILD_TARGET") + else + targets+=("slim" "full") + fi + + for target in "${targets[@]}"; do + local image_tag="periodvault-runner:${target}" + if [[ -n "$PUSH_REGISTRY" ]]; then + image_tag="${PUSH_REGISTRY}/periodvault-runner:${target}" + fi + + log "Building runner image: ${image_tag} (target: ${target}, platform: linux/amd64)" + DOCKER_BUILDKIT=1 docker build --platform linux/amd64 --pull \ + --target "$target" \ + -t "$image_tag" \ + "$INFRA_DIR" + + if [[ -n "$PUSH_REGISTRY" ]]; then + log "Pushing ${image_tag}..." + docker push "$image_tag" + log "Image pushed: ${image_tag}" + else + log "Image built locally: ${image_tag}" + fi + done + + if [[ -z "$PUSH_REGISTRY" ]]; then + log "" + log "Use --push to push to a registry." + log "Example: ./scripts/runner.sh --mode build-image --push localhost:5000" + fi +} + +do_setup_linux() { + find_infra_dir + ensure_docker + + log "Docker-based runner setup (infra/runners/)" + log "" + + if [[ ! -f "${INFRA_DIR}/.env" ]]; then + if [[ -f "${INFRA_DIR}/.env.example" ]]; then + cp "${INFRA_DIR}/.env.example" "${INFRA_DIR}/.env" + log "Created ${INFRA_DIR}/.env from template." + log "Edit this file to set your GITHUB_PAT." + log "" + printf '[runner] Enter your GitHub PAT (or press Enter to edit .env manually later): ' + read -r pat_input + if [[ -n "$pat_input" ]]; then + sed -i "s/^GITHUB_PAT=.*/GITHUB_PAT=${pat_input}/" "${INFRA_DIR}/.env" + log "GITHUB_PAT set in .env" + fi + else + die "Missing .env.example template in ${INFRA_DIR}" + fi + else + log ".env already exists; skipping." + fi + + if [[ ! -f "${INFRA_DIR}/envs/periodvault.env" ]]; then + if [[ -f "${INFRA_DIR}/envs/periodvault.env.example" ]]; then + cp "${INFRA_DIR}/envs/periodvault.env.example" "${INFRA_DIR}/envs/periodvault.env" + log "Created ${INFRA_DIR}/envs/periodvault.env from template." + log "Edit this file to configure REPO_URL, RUNNER_NAME, and resource limits." + else + die "Missing envs/periodvault.env.example template in ${INFRA_DIR}" + fi + else + log "envs/periodvault.env already exists; skipping." + fi + + log "" + log "Starting runners..." + compose up -d + + log "" + log "Setup complete. Verify with: ./scripts/runner.sh --mode status" + log "" + log "To activate self-hosted CI, set these repository variables:" + log ' gh variable set CI_RUNS_ON --body '"'"'["self-hosted","Linux","X64"]'"'" + log ' gh variable set CI_RUNS_ON_ANDROID --body '"'"'["self-hosted","Linux","X64","android-emulator"]'"'" +} + +do_start_linux() { + find_infra_dir + ensure_docker + + log "Starting Docker runners..." + compose up -d + log "Runners started." +} + +do_stop_linux() { + find_infra_dir + ensure_docker + + log "Stopping Docker runners..." + compose down + log "Runners stopped." +} + +do_status_linux() { + find_infra_dir + ensure_docker + + log "Docker runner status (infra/runners/):" + log "" + compose ps + log "" + log "Recent logs (last 20 lines):" + compose logs --tail 20 2>/dev/null || true +} + +do_uninstall_linux() { + find_infra_dir + ensure_docker + + log "Uninstalling Docker runners..." + compose down -v --rmi local 2>/dev/null || compose down -v + log "Docker runners removed (containers, volumes, local images)." + log "" + log "Note: Runners should auto-deregister from GitHub (ephemeral mode)." + log "If stale runners remain, remove them manually:" + log " gh api -X DELETE repos/OWNER/REPO/actions/runners/RUNNER_ID" +} + +# =========================================================================== +# Entry point +# =========================================================================== + +main() { + parse_args "$@" + + local os + os="$(detect_os)" + + case "$MODE" in + setup) + if [[ "$os" == "darwin" ]]; then do_setup_darwin; else do_setup_linux; fi ;; + start) + if [[ "$os" == "darwin" ]]; then do_start_darwin; else do_start_linux; fi ;; + stop) + if [[ "$os" == "darwin" ]]; then do_stop_darwin; else do_stop_linux; fi ;; + status) + if [[ "$os" == "darwin" ]]; then do_status_darwin; else do_status_linux; fi ;; + build-image) + do_build_image ;; + uninstall) + if [[ "$os" == "darwin" ]]; then do_uninstall_darwin; else do_uninstall_linux; fi ;; + *) + die "unexpected mode: $MODE" ;; + esac +} + +main "$@" diff --git a/runners-conversion/periodVault/setup-dev-environment.sh b/runners-conversion/periodVault/setup-dev-environment.sh new file mode 100755 index 0000000..f2bf6ac --- /dev/null +++ b/runners-conversion/periodVault/setup-dev-environment.sh @@ -0,0 +1,280 @@ +#!/usr/bin/env bash +# setup-dev-environment.sh +# Idempotent bootstrap for local Period Vault development. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +INSTALL_MISSING=0 +RUN_CHECKS=0 + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +usage() { + cat <<'EOF' +Usage: ./scripts/setup-dev-environment.sh [--install] [--verify] [--help] + +Options: + --install Attempt safe auto-install for supported tools (Homebrew on macOS). + --verify Run post-setup verification commands. + --help Show this help. + +Notes: + - Script is idempotent and safe to re-run. + - Without --install, the script reports actionable install commands. + - It never writes credentials/tokens and does not run privileged commands automatically. +EOF +} + +log() { printf "${BLUE}[%s]${NC} %s\n" "setup" "$*"; } +ok() { printf "${GREEN}[ok]${NC} %s\n" "$*"; } +warn() { printf "${YELLOW}[warn]${NC} %s\n" "$*"; } +fail() { printf "${RED}[error]${NC} %s\n" "$*" >&2; } + +for arg in "$@"; do + case "$arg" in + --install) INSTALL_MISSING=1 ;; + --verify) RUN_CHECKS=1 ;; + --help|-h) usage; exit 0 ;; + *) + fail "Unknown option: $arg" + usage + exit 1 + ;; + esac +done + +if [[ ! -x "$PROJECT_ROOT/gradlew" ]]; then + fail "Missing executable Gradle wrapper at ./gradlew." + exit 1 +fi + +OS="$(uname -s)" +IS_MAC=0 +IS_LINUX=0 +case "$OS" in + Darwin) IS_MAC=1 ;; + Linux) IS_LINUX=1 ;; + *) + warn "Unsupported OS: $OS. Script will run checks but skip auto-install." + ;; +esac + +declare -a REQUIRED_TOOLS +declare -a OPTIONAL_TOOLS +declare -a MISSING_REQUIRED +declare -a MISSING_OPTIONAL +declare -a REMEDIATION_HINTS + +REQUIRED_TOOLS=(git java) +OPTIONAL_TOOLS=(gh act adb emulator avdmanager sdkmanager) + +if [[ $IS_MAC -eq 1 ]]; then + REQUIRED_TOOLS+=(xcodebuild xcrun) +fi + +have_cmd() { + command -v "$1" >/dev/null 2>&1 +} + +append_unique_hint() { + local hint="$1" + local existing + for existing in "${REMEDIATION_HINTS[@]:-}"; do + if [[ "$existing" == "$hint" ]]; then + return 0 + fi + done + REMEDIATION_HINTS+=("$hint") +} + +detect_java_major() { + local raw version major + raw="$(java -version 2>&1 | head -n 1 || true)" + version="$(echo "$raw" | sed -E 's/.*"([0-9]+)(\.[0-9]+.*)?".*/\1/' || true)" + if [[ -z "$version" ]]; then + echo "0" + return 0 + fi + major="$version" + echo "$major" +} + +install_with_brew() { + local formula="$1" + if ! have_cmd brew; then + append_unique_hint "Install Homebrew first: https://brew.sh/" + return 1 + fi + + if brew list --formula "$formula" >/dev/null 2>&1; then + ok "brew formula '$formula' already installed" + return 0 + fi + + log "Installing '$formula' via Homebrew..." + if brew install "$formula"; then + ok "Installed '$formula'" + return 0 + fi + return 1 +} + +try_install_tool() { + local tool="$1" + if [[ $INSTALL_MISSING -ne 1 ]]; then + return 1 + fi + + if [[ $IS_MAC -eq 1 ]]; then + case "$tool" in + git) install_with_brew git ;; + gh) install_with_brew gh ;; + act) install_with_brew act ;; + java) + install_with_brew openjdk@17 + append_unique_hint "If needed, configure JAVA_HOME for JDK 17+: export JAVA_HOME=\$(/usr/libexec/java_home -v 17)" + ;; + *) + return 1 + ;; + esac + return $? + fi + + if [[ $IS_LINUX -eq 1 ]]; then + append_unique_hint "Install '$tool' using your distro package manager and re-run this script." + fi + return 1 +} + +tool_hint() { + local tool="$1" + if [[ $IS_MAC -eq 1 ]]; then + case "$tool" in + git|gh|act) echo "brew install $tool" ;; + java) echo "brew install openjdk@17 && export JAVA_HOME=\$(/usr/libexec/java_home -v 17)" ;; + xcodebuild|xcrun) echo "Install Xcode from the App Store and run: sudo xcodebuild -runFirstLaunch" ;; + adb|emulator|avdmanager|sdkmanager) echo "Install Android Studio + Android SDK command-line tools, then add platform-tools/emulator/cmdline-tools/latest/bin to PATH." ;; + *) echo "Install '$tool' and ensure it is on PATH." ;; + esac + return 0 + fi + + if [[ $IS_LINUX -eq 1 ]]; then + case "$tool" in + git) echo "sudo apt-get install -y git" ;; + java) echo "sudo apt-get install -y openjdk-17-jdk" ;; + gh) echo "Install GitHub CLI from https://cli.github.com/" ;; + act) echo "Install act from https://github.com/nektos/act" ;; + *) echo "Install '$tool' using your package manager and add it to PATH." ;; + esac + return 0 + fi + + echo "Install '$tool' and ensure it is on PATH." +} + +log "Checking local development prerequisites..." + +for tool in "${REQUIRED_TOOLS[@]}"; do + if have_cmd "$tool"; then + ok "Found required tool: $tool" + else + warn "Missing required tool: $tool" + if try_install_tool "$tool" && have_cmd "$tool"; then + ok "Auto-installed required tool: $tool" + else + MISSING_REQUIRED+=("$tool") + append_unique_hint "$(tool_hint "$tool")" + fi + fi +done + +for tool in "${OPTIONAL_TOOLS[@]}"; do + if have_cmd "$tool"; then + ok "Found optional tool: $tool" + else + warn "Missing optional tool: $tool" + if try_install_tool "$tool" && have_cmd "$tool"; then + ok "Auto-installed optional tool: $tool" + else + MISSING_OPTIONAL+=("$tool") + append_unique_hint "$(tool_hint "$tool")" + fi + fi +done + +if have_cmd java; then + JAVA_MAJOR="$(detect_java_major)" + if [[ "$JAVA_MAJOR" =~ ^[0-9]+$ ]] && [[ "$JAVA_MAJOR" -ge 17 ]]; then + ok "Java version is compatible (major=$JAVA_MAJOR)" + else + fail "Java 17+ is required (detected major=$JAVA_MAJOR)." + append_unique_hint "$(tool_hint "java")" + if [[ ! " ${MISSING_REQUIRED[*]} " =~ " java " ]]; then + MISSING_REQUIRED+=("java") + fi + fi +fi + +log "Installing git hooks (idempotent)..." +"$SCRIPT_DIR/install-hooks.sh" +ok "Git hooks configured" + +echo "" +echo "================================================" +echo "Setup Summary" +echo "================================================" +if [[ ${#MISSING_REQUIRED[@]} -eq 0 ]]; then + ok "All required prerequisites are available." +else + fail "Missing required prerequisites: ${MISSING_REQUIRED[*]}" +fi + +if [[ ${#MISSING_OPTIONAL[@]} -eq 0 ]]; then + ok "All optional developer tools are available." +else + warn "Missing optional tools: ${MISSING_OPTIONAL[*]}" +fi + +if [[ ${#REMEDIATION_HINTS[@]} -gt 0 ]]; then + echo "" + echo "Suggested remediation:" + for hint in "${REMEDIATION_HINTS[@]}"; do + echo " - $hint" + done +fi + +echo "" +echo "Verification commands:" +echo " - ./gradlew shared:jvmTest" +echo " - ./scripts/run-emulator-tests.sh android" +echo " - ./scripts/run-emulator-tests.sh ios" +echo " - ./scripts/verify.sh" + +if [[ $RUN_CHECKS -eq 1 ]]; then + echo "" + log "Running lightweight verification commands..." + "$PROJECT_ROOT/gradlew" --version >/dev/null + ok "Gradle wrapper check passed" + if have_cmd gh; then + gh --version >/dev/null + ok "GitHub CLI check passed" + fi + if have_cmd xcrun; then + xcrun simctl list devices available >/dev/null + ok "iOS simulator listing check passed" + fi +fi + +if [[ ${#MISSING_REQUIRED[@]} -gt 0 ]]; then + exit 1 +fi + +ok "Developer environment bootstrap completed." diff --git a/runners-conversion/periodVault/setup.sh b/runners-conversion/periodVault/setup.sh new file mode 100755 index 0000000..2a9bc59 --- /dev/null +++ b/runners-conversion/periodVault/setup.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# setup.sh — Cross-platform developer environment setup entrypoint. +# +# macOS: Dispatches to scripts/setup-dev-environment.sh (full bootstrap). +# Linux: Minimal bootstrap (JDK check, git hooks, Gradle dependencies). +# +# Usage: ./scripts/setup.sh [--install] [--verify] + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +OS="$(uname -s)" + +if [[ "$OS" == "Darwin" ]]; then + exec "${SCRIPT_DIR}/setup-dev-environment.sh" "$@" +fi + +if [[ "$OS" != "Linux" ]]; then + echo "Unsupported OS: $OS. This script supports macOS and Linux." + exit 1 +fi + +# --- Linux bootstrap --- + +echo "=== periodvault development setup (Linux) ===" +echo "" + +# Check JDK +if command -v java >/dev/null 2>&1; then + JAVA_MAJOR="$(java -version 2>&1 | head -1 | sed -E 's/.*"([0-9]+).*/\1/')" + echo "[ok] Java is installed (major version: $JAVA_MAJOR)" + if [[ "$JAVA_MAJOR" -lt 17 ]]; then + echo "[warn] JDK 17+ is required. Found major version $JAVA_MAJOR." + echo " Install: sudo apt-get install -y openjdk-17-jdk-headless" + fi +else + echo "[error] Java not found." + echo " Install: sudo apt-get install -y openjdk-17-jdk-headless" + exit 1 +fi + +# Check Android SDK +if [[ -n "${ANDROID_HOME:-}" ]]; then + echo "[ok] ANDROID_HOME is set: $ANDROID_HOME" +else + echo "[warn] ANDROID_HOME not set. Android SDK may not be available." + echo " Set ANDROID_HOME to your Android SDK path for Android builds." +fi + +# Install git hooks +if [[ -x "$SCRIPT_DIR/install-hooks.sh" ]]; then + echo "" + echo "Installing git hooks..." + "$SCRIPT_DIR/install-hooks.sh" + echo "[ok] Git hooks configured" +fi + +# Download Gradle dependencies +if [[ -x "$PROJECT_ROOT/gradlew" ]]; then + echo "" + echo "Downloading Gradle dependencies..." + "$PROJECT_ROOT/gradlew" --no-daemon dependencies > /dev/null 2>&1 || true + echo "[ok] Gradle dependencies downloaded" +fi + +echo "" +echo "=== Setup complete (Linux) ===" +echo "" +echo "Verification commands:" +echo " ./gradlew shared:jvmTest" +echo " ./gradlew androidApp:testDebugUnitTest" diff --git a/runners-conversion/periodVault/test-audit-enforcement.sh b/runners-conversion/periodVault/test-audit-enforcement.sh new file mode 100755 index 0000000..a7edb98 --- /dev/null +++ b/runners-conversion/periodVault/test-audit-enforcement.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# test-audit-enforcement.sh +# Smoke checks for process/audit enforcement scripts. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$PROJECT_ROOT" + +"$SCRIPT_DIR/check-process.sh" HEAD~1 +"$SCRIPT_DIR/validate-sdd.sh" HEAD~1 +FORCE_AUDIT_GATES=1 "$SCRIPT_DIR/validate-tdd.sh" HEAD~1 + +TMP_REPORT="$(mktemp)" +cat >"$TMP_REPORT" <<'MD' +# CODEX Report +## Requirements Mapping +- sample +## Constitution Compliance Matrix +| Principle | Status | Notes | +|-----------|--------|-------| +| I | pass | sample | +## Evidence +- sample +## Risks +- sample +MD +"$SCRIPT_DIR/validate-audit-report.sh" "$TMP_REPORT" +rm -f "$TMP_REPORT" + +echo "[test-audit-enforcement] PASS" diff --git a/runners-conversion/periodVault/test-infra-runners.sh b/runners-conversion/periodVault/test-infra-runners.sh new file mode 100755 index 0000000..596dd58 --- /dev/null +++ b/runners-conversion/periodVault/test-infra-runners.sh @@ -0,0 +1,476 @@ +#!/usr/bin/env bash +# test-infra-runners.sh — Integration tests for self-hosted CI runner infrastructure. +# +# Tests cover: +# 1. Shell script syntax (bash -n) for all infrastructure scripts +# 2. runner.sh argument parsing and help output +# 3. setup.sh cross-platform dispatch logic +# 4. Docker image builds (slim + full) with content verification +# 5. Docker Compose configuration validation +# 6. ci.yml runner variable expression syntax +# 7. lib.sh headless emulator function structure +# 8. entrypoint.sh env validation logic +# +# Usage: ./scripts/test-infra-runners.sh [--skip-docker] +# +# --skip-docker Skip Docker image build tests (useful in CI without Docker) + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +PASS_COUNT=0 +FAIL_COUNT=0 +SKIP_COUNT=0 +SKIP_DOCKER=false + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +log() { echo "[test-infra] $*"; } +pass() { PASS_COUNT=$((PASS_COUNT + 1)); log "PASS: $*"; } +fail() { FAIL_COUNT=$((FAIL_COUNT + 1)); log "FAIL: $*"; } +skip() { SKIP_COUNT=$((SKIP_COUNT + 1)); log "SKIP: $*"; } + +assert_file_exists() { + local path="$1" label="$2" + if [[ -f "$path" ]]; then + pass "$label" + else + fail "$label — file not found: $path" + fi +} + +assert_file_executable() { + local path="$1" label="$2" + if [[ -x "$path" ]]; then + pass "$label" + else + fail "$label — not executable: $path" + fi +} + +assert_contains() { + local haystack="$1" needle="$2" label="$3" + if echo "$haystack" | grep -qF -- "$needle"; then + pass "$label" + else + fail "$label — expected to contain: $needle" + fi +} + +assert_not_contains() { + local haystack="$1" needle="$2" label="$3" + if ! echo "$haystack" | grep -qF -- "$needle"; then + pass "$label" + else + fail "$label — should NOT contain: $needle" + fi +} + +assert_exit_code() { + local expected="$1" label="$2" + shift 2 + local actual + set +e + "$@" >/dev/null 2>&1 + actual=$? + set -e + if [[ "$actual" -eq "$expected" ]]; then + pass "$label" + else + fail "$label — expected exit $expected, got $actual" + fi +} + +# --------------------------------------------------------------------------- +# Parse args +# --------------------------------------------------------------------------- + +while [[ $# -gt 0 ]]; do + case "$1" in + --skip-docker) SKIP_DOCKER=true; shift ;; + *) echo "Unknown arg: $1"; exit 1 ;; + esac +done + +# =========================================================================== +# Section 1: File existence and permissions +# =========================================================================== + +log "" +log "=== Section 1: File existence and permissions ===" + +assert_file_exists "$PROJECT_ROOT/infra/runners/Dockerfile" "Dockerfile exists" +assert_file_exists "$PROJECT_ROOT/infra/runners/docker-compose.yml" "docker-compose.yml exists" +assert_file_exists "$PROJECT_ROOT/infra/runners/entrypoint.sh" "entrypoint.sh exists" +assert_file_exists "$PROJECT_ROOT/infra/runners/.env.example" "env.example exists" +assert_file_exists "$PROJECT_ROOT/infra/runners/envs/periodvault.env.example" "periodvault.env.example exists" +assert_file_exists "$PROJECT_ROOT/infra/runners/.gitignore" ".gitignore exists" +assert_file_exists "$PROJECT_ROOT/infra/runners/README.md" "runners README exists" +assert_file_exists "$PROJECT_ROOT/scripts/runner.sh" "runner.sh exists" +assert_file_exists "$PROJECT_ROOT/scripts/setup.sh" "setup.sh exists" +assert_file_exists "$PROJECT_ROOT/.github/workflows/build-runner-image.yml" "build-runner-image workflow exists" + +assert_file_executable "$PROJECT_ROOT/infra/runners/entrypoint.sh" "entrypoint.sh is executable" +assert_file_executable "$PROJECT_ROOT/scripts/runner.sh" "runner.sh is executable" +assert_file_executable "$PROJECT_ROOT/scripts/setup.sh" "setup.sh is executable" + +# =========================================================================== +# Section 2: Shell script syntax validation (bash -n) +# =========================================================================== + +log "" +log "=== Section 2: Shell script syntax ===" + +for script in \ + "$PROJECT_ROOT/scripts/runner.sh" \ + "$PROJECT_ROOT/scripts/setup.sh" \ + "$PROJECT_ROOT/infra/runners/entrypoint.sh"; do + name="$(basename "$script")" + if bash -n "$script" 2>/dev/null; then + pass "bash -n $name" + else + fail "bash -n $name — syntax error" + fi +done + +# =========================================================================== +# Section 3: runner.sh argument parsing +# =========================================================================== + +log "" +log "=== Section 3: runner.sh argument parsing ===" + +# --help should exit 0 and print usage +HELP_OUT="$("$PROJECT_ROOT/scripts/runner.sh" --help 2>&1)" || true +assert_contains "$HELP_OUT" "Usage:" "runner.sh --help shows usage" +assert_contains "$HELP_OUT" "--mode" "runner.sh --help mentions --mode" +assert_contains "$HELP_OUT" "build-image" "runner.sh --help mentions build-image" +assert_exit_code 0 "runner.sh --help exits 0" "$PROJECT_ROOT/scripts/runner.sh" --help + +# Missing --mode should fail +assert_exit_code 1 "runner.sh without --mode exits 1" "$PROJECT_ROOT/scripts/runner.sh" + +# Invalid mode should fail +assert_exit_code 1 "runner.sh --mode invalid exits 1" "$PROJECT_ROOT/scripts/runner.sh" --mode invalid + +# =========================================================================== +# Section 4: setup.sh platform dispatch +# =========================================================================== + +log "" +log "=== Section 4: setup.sh structure ===" + +SETUP_CONTENT="$(cat "$PROJECT_ROOT/scripts/setup.sh")" +assert_contains "$SETUP_CONTENT" "Darwin" "setup.sh handles macOS" +assert_contains "$SETUP_CONTENT" "Linux" "setup.sh handles Linux" +assert_contains "$SETUP_CONTENT" "setup-dev-environment.sh" "setup.sh dispatches to setup-dev-environment.sh" + +# =========================================================================== +# Section 5: entrypoint.sh validation logic +# =========================================================================== + +log "" +log "=== Section 5: entrypoint.sh structure ===" + +ENTRY_CONTENT="$(cat "$PROJECT_ROOT/infra/runners/entrypoint.sh")" +assert_contains "$ENTRY_CONTENT" "GITHUB_PAT" "entrypoint.sh validates GITHUB_PAT" +assert_contains "$ENTRY_CONTENT" "REPO_URL" "entrypoint.sh validates REPO_URL" +assert_contains "$ENTRY_CONTENT" "RUNNER_NAME" "entrypoint.sh validates RUNNER_NAME" +assert_contains "$ENTRY_CONTENT" "--ephemeral" "entrypoint.sh uses ephemeral mode" +assert_contains "$ENTRY_CONTENT" "trap cleanup" "entrypoint.sh traps for cleanup" +assert_contains "$ENTRY_CONTENT" "registration-token" "entrypoint.sh generates registration token" +assert_contains "$ENTRY_CONTENT" "remove-token" "entrypoint.sh handles removal token" + +# =========================================================================== +# Section 6: Dockerfile structure +# =========================================================================== + +log "" +log "=== Section 6: Dockerfile structure ===" + +DOCKERFILE="$(cat "$PROJECT_ROOT/infra/runners/Dockerfile")" +assert_contains "$DOCKERFILE" "FROM ubuntu:24.04 AS base" "Dockerfile has base stage" +assert_contains "$DOCKERFILE" "FROM base AS slim" "Dockerfile has slim stage" +assert_contains "$DOCKERFILE" "FROM slim AS full" "Dockerfile has full stage" +assert_contains "$DOCKERFILE" "openjdk-17-jdk-headless" "Dockerfile installs JDK 17" +assert_contains "$DOCKERFILE" "platforms;android-34" "Dockerfile installs Android SDK 34" +assert_contains "$DOCKERFILE" "build-tools;34.0.0" "Dockerfile installs build-tools 34" +assert_contains "$DOCKERFILE" "system-images;android-34;google_apis;x86_64" "Full stage includes system images" +assert_contains "$DOCKERFILE" "avdmanager create avd" "Full stage pre-creates AVD" +assert_contains "$DOCKERFILE" "kvm" "Full stage sets up KVM group" +assert_contains "$DOCKERFILE" "HEALTHCHECK" "Dockerfile has HEALTHCHECK" +assert_contains "$DOCKERFILE" "ENTRYPOINT" "Dockerfile has ENTRYPOINT" +assert_contains "$DOCKERFILE" 'userdel -r ubuntu' "Dockerfile removes ubuntu user (GID 1000 conflict fix)" + +# =========================================================================== +# Section 7: docker-compose.yml structure +# =========================================================================== + +log "" +log "=== Section 7: docker-compose.yml structure ===" + +COMPOSE="$(cat "$PROJECT_ROOT/infra/runners/docker-compose.yml")" +assert_contains "$COMPOSE" "registry:" "Compose has registry service" +assert_contains "$COMPOSE" "runner-slim-1:" "Compose has runner-slim-1" +assert_contains "$COMPOSE" "runner-slim-2:" "Compose has runner-slim-2" +assert_contains "$COMPOSE" "runner-emulator:" "Compose has runner-emulator" +assert_contains "$COMPOSE" "registry:2" "Registry uses official image" +assert_contains "$COMPOSE" "/dev/kvm" "Emulator gets KVM device" +assert_contains "$COMPOSE" "no-new-privileges" "Security: no-new-privileges" +assert_contains "$COMPOSE" "init: true" "Uses tini (init: true)" +assert_contains "$COMPOSE" "stop_grace_period" "Emulator has stop grace period" +assert_contains "$COMPOSE" "android-emulator" "Emulator runner has android-emulator label" + +# =========================================================================== +# Section 8: ci.yml runner variable expressions +# =========================================================================== + +log "" +log "=== Section 8: ci.yml runner variable expressions ===" + +CI_YML="$(cat "$PROJECT_ROOT/.github/workflows/ci.yml")" +assert_contains "$CI_YML" 'vars.CI_RUNS_ON_MACOS' "ci.yml uses CI_RUNS_ON_MACOS variable" +assert_contains "$CI_YML" 'vars.CI_RUNS_ON_ANDROID' "ci.yml uses CI_RUNS_ON_ANDROID variable" +assert_contains "$CI_YML" 'vars.CI_RUNS_ON ' "ci.yml uses CI_RUNS_ON variable" +assert_contains "$CI_YML" 'fromJSON(' "ci.yml uses fromJSON() for runner targeting" + +# Verify fallback values are present (safe default = current macOS runner) +assert_contains "$CI_YML" '"self-hosted","macOS","periodvault"' "ci.yml has macOS fallback" + +# Verify parallelism: test-ios-simulator should NOT depend on test-android-emulator +# Extract test-ios-simulator needs line +IOS_SECTION="$(awk '/test-ios-simulator:/,/runs-on:/' "$PROJECT_ROOT/.github/workflows/ci.yml")" +assert_not_contains "$IOS_SECTION" "test-android-emulator" "test-ios-simulator does NOT depend on test-android-emulator (parallel)" +assert_contains "$IOS_SECTION" "test-shared" "test-ios-simulator depends on test-shared" + +# Verify audit-quality-gate waits for both platform tests +AUDIT_SECTION="$(awk '/audit-quality-gate:/,/runs-on:/' "$PROJECT_ROOT/.github/workflows/ci.yml")" +assert_contains "$AUDIT_SECTION" "test-android-emulator" "audit-quality-gate waits for android emulator" +assert_contains "$AUDIT_SECTION" "test-ios-simulator" "audit-quality-gate waits for ios simulator" + +# =========================================================================== +# Section 9: lib.sh headless emulator support +# =========================================================================== + +log "" +log "=== Section 9: lib.sh headless emulator support ===" + +LIB_SH="$(cat "$PROJECT_ROOT/scripts/lib.sh")" +assert_contains "$LIB_SH" "start_emulator_headless()" "lib.sh defines start_emulator_headless()" +assert_contains "$LIB_SH" "-no-window" "Headless emulator uses -no-window" +assert_contains "$LIB_SH" "-no-audio" "Headless emulator uses -no-audio" +assert_contains "$LIB_SH" "swiftshader_indirect" "Headless emulator uses swiftshader GPU" + +# Verify OS-aware dispatch in ensure_android_emulator +assert_contains "$LIB_SH" '"$(uname -s)" == "Linux"' "ensure_android_emulator detects Linux" +assert_contains "$LIB_SH" 'start_emulator_headless' "ensure_android_emulator calls headless on Linux" +assert_contains "$LIB_SH" 'start_emulator_windowed' "ensure_android_emulator calls windowed on macOS" + +# Verify headless zombie kill is macOS-only +ZOMBIE_LINE="$(grep -n 'is_emulator_headless' "$PROJECT_ROOT/scripts/lib.sh" | grep 'Darwin' || true)" +if [[ -n "$ZOMBIE_LINE" ]]; then + pass "Headless zombie kill is guarded by Darwin check" +else + fail "Headless zombie kill should be macOS-only (Darwin guard)" +fi + +# =========================================================================== +# Section 10: .gitignore protects secrets +# =========================================================================== + +log "" +log "=== Section 10: .gitignore protects secrets ===" + +GITIGNORE="$(cat "$PROJECT_ROOT/infra/runners/.gitignore")" +assert_contains "$GITIGNORE" ".env" ".gitignore excludes .env" +assert_contains "$GITIGNORE" "!.env.example" ".gitignore keeps .example files" + +# =========================================================================== +# Section 11: Docker image builds (requires Docker) +# =========================================================================== + +log "" +log "=== Section 11: Docker image builds ===" + +if $SKIP_DOCKER; then + skip "Docker image build tests (--skip-docker)" +elif ! command -v docker &>/dev/null; then + skip "Docker image build tests (docker not found)" +elif ! docker info >/dev/null 2>&1; then + skip "Docker image build tests (docker daemon not running)" +else + DOCKER_PLATFORM="linux/amd64" + + # --- Build slim --- + log "Building slim image (this may take a few minutes)..." + if docker build --platform "$DOCKER_PLATFORM" --target slim \ + -t periodvault-runner-test:slim "$PROJECT_ROOT/infra/runners/" >/dev/null 2>&1; then + pass "Docker build: slim target succeeds" + + # Verify slim image contents + SLIM_JAVA="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:slim \ + java -version 2>&1 | head -1)" || true + if echo "$SLIM_JAVA" | grep -q "17"; then + pass "Slim image: Java 17 is installed" + else + fail "Slim image: Java 17 not found — got: $SLIM_JAVA" + fi + + SLIM_SDK="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:slim \ + bash -c 'ls $ANDROID_HOME/platforms/' 2>&1)" || true + if echo "$SLIM_SDK" | grep -q "android-34"; then + pass "Slim image: Android SDK 34 is installed" + else + fail "Slim image: Android SDK 34 not found — got: $SLIM_SDK" + fi + + SLIM_RUNNER="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:slim \ + bash -c 'ls /home/runner/actions-runner/run.sh' 2>&1)" || true + if echo "$SLIM_RUNNER" | grep -q "run.sh"; then + pass "Slim image: GitHub Actions runner agent is installed" + else + fail "Slim image: runner agent not found" + fi + + SLIM_USER="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:slim \ + whoami 2>&1)" || true + if [[ "$SLIM_USER" == "runner" ]]; then + pass "Slim image: runs as 'runner' user" + else + fail "Slim image: expected user 'runner', got '$SLIM_USER'" + fi + + SLIM_ENTRY="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:slim \ + bash -c 'test -x /home/runner/entrypoint.sh && echo ok' 2>&1)" || true + if [[ "$SLIM_ENTRY" == "ok" ]]; then + pass "Slim image: entrypoint.sh is present and executable" + else + fail "Slim image: entrypoint.sh not executable" + fi + + # Verify slim does NOT have emulator + SLIM_EMU="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:slim \ + bash -c 'command -v emulator || echo not-found' 2>&1)" || true + if echo "$SLIM_EMU" | grep -q "not-found"; then + pass "Slim image: does NOT include emulator (expected)" + else + fail "Slim image: unexpectedly contains emulator" + fi + else + fail "Docker build: slim target failed" + fi + + # --- Build full --- + log "Building full image (this may take several minutes)..." + if docker build --platform "$DOCKER_PLATFORM" --target full \ + -t periodvault-runner-test:full "$PROJECT_ROOT/infra/runners/" >/dev/null 2>&1; then + pass "Docker build: full target succeeds" + + # Verify full image has emulator + FULL_EMU="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:full \ + bash -c 'command -v emulator && echo found' 2>&1)" || true + if echo "$FULL_EMU" | grep -q "found"; then + pass "Full image: emulator is installed" + else + fail "Full image: emulator not found" + fi + + # Verify full image has AVD pre-created + FULL_AVD="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:full \ + bash -c '${ANDROID_HOME}/cmdline-tools/latest/bin/avdmanager list avd 2>/dev/null | grep "Name:" || echo none' 2>&1)" || true + if echo "$FULL_AVD" | grep -q "phone"; then + pass "Full image: AVD 'phone' is pre-created" + else + fail "Full image: AVD 'phone' not found — got: $FULL_AVD" + fi + + # Verify full image has system images + FULL_SYSIMG="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:full \ + bash -c 'ls $ANDROID_HOME/system-images/android-34/google_apis/x86_64/ 2>/dev/null | head -1 || echo none' 2>&1)" || true + if [[ "$FULL_SYSIMG" != "none" ]]; then + pass "Full image: system-images;android-34;google_apis;x86_64 installed" + else + fail "Full image: system images not found" + fi + + # Verify full image has xvfb + FULL_XVFB="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:full \ + bash -c 'command -v Xvfb && echo found || echo not-found' 2>&1)" || true + if echo "$FULL_XVFB" | grep -q "found"; then + pass "Full image: Xvfb is installed" + else + fail "Full image: Xvfb not found" + fi + + # Verify kvm group exists and runner is a member + FULL_KVM="$(docker run --rm --platform "$DOCKER_PLATFORM" periodvault-runner-test:full \ + bash -c 'id runner 2>/dev/null' 2>&1)" || true + if echo "$FULL_KVM" | grep -q "kvm"; then + pass "Full image: runner user is in kvm group" + else + fail "Full image: runner not in kvm group — got: $FULL_KVM" + fi + else + fail "Docker build: full target failed" + fi + + # --- Docker Compose validation --- + log "Validating docker-compose.yml..." + # Create temp env files for validation + cp "$PROJECT_ROOT/infra/runners/.env.example" "$PROJECT_ROOT/infra/runners/.env" + cp "$PROJECT_ROOT/infra/runners/envs/periodvault.env.example" "$PROJECT_ROOT/infra/runners/envs/periodvault.env" + + if docker compose -f "$PROJECT_ROOT/infra/runners/docker-compose.yml" config --quiet 2>/dev/null; then + pass "docker compose config validates" + else + fail "docker compose config failed" + fi + + # Verify compose defines expected services + COMPOSE_SERVICES="$(docker compose -f "$PROJECT_ROOT/infra/runners/docker-compose.yml" config --services 2>/dev/null)" + assert_contains "$COMPOSE_SERVICES" "registry" "Compose service: registry" + assert_contains "$COMPOSE_SERVICES" "runner-slim-1" "Compose service: runner-slim-1" + assert_contains "$COMPOSE_SERVICES" "runner-slim-2" "Compose service: runner-slim-2" + assert_contains "$COMPOSE_SERVICES" "runner-emulator" "Compose service: runner-emulator" + + # Clean up temp env files + rm -f "$PROJECT_ROOT/infra/runners/.env" "$PROJECT_ROOT/infra/runners/envs/periodvault.env" + + # --- Cleanup test images --- + docker rmi periodvault-runner-test:slim periodvault-runner-test:full 2>/dev/null || true +fi + +# =========================================================================== +# Section 12: build-runner-image.yml workflow structure +# =========================================================================== + +log "" +log "=== Section 12: build-runner-image.yml structure ===" + +BUILD_WF="$(cat "$PROJECT_ROOT/.github/workflows/build-runner-image.yml")" +assert_contains "$BUILD_WF" "slim" "Build workflow includes slim target" +assert_contains "$BUILD_WF" "full" "Build workflow includes full target" +assert_contains "$BUILD_WF" "matrix" "Build workflow uses matrix strategy" +assert_contains "$BUILD_WF" "ghcr.io" "Build workflow pushes to GHCR" + +# =========================================================================== +# Results +# =========================================================================== + +log "" +log "==============================" +TOTAL=$((PASS_COUNT + FAIL_COUNT + SKIP_COUNT)) +log "Results: $PASS_COUNT passed, $FAIL_COUNT failed, $SKIP_COUNT skipped (total: $TOTAL)" +log "==============================" + +if [[ $FAIL_COUNT -gt 0 ]]; then + log "FAILED" + exit 1 +fi + +log "ALL PASSED" +exit 0 diff --git a/runners-conversion/periodVault/test-test-quality-gate.sh b/runners-conversion/periodVault/test-test-quality-gate.sh new file mode 100755 index 0000000..0b0bf0a --- /dev/null +++ b/runners-conversion/periodVault/test-test-quality-gate.sh @@ -0,0 +1,239 @@ +#!/usr/bin/env bash +# test-test-quality-gate.sh — Integration-style tests for validate-test-quality.sh. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +PASS_COUNT=0 +FAIL_COUNT=0 +declare -a TMP_REPOS=() + +log() { + echo "[test-quality-test] $*" +} + +pass() { + PASS_COUNT=$((PASS_COUNT + 1)) + log "PASS: $*" +} + +fail() { + FAIL_COUNT=$((FAIL_COUNT + 1)) + log "FAIL: $*" +} + +require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "Missing required command: $1" + exit 1 + fi +} + +run_expect_success() { + local label="$1" + shift + if "$@" >/tmp/test-quality-gate.out 2>&1; then + pass "$label" + else + fail "$label" + cat /tmp/test-quality-gate.out + fi +} + +run_expect_failure() { + local label="$1" + shift + if "$@" >/tmp/test-quality-gate.out 2>&1; then + fail "$label (expected failure but command succeeded)" + cat /tmp/test-quality-gate.out + else + pass "$label" + fi +} + +create_fixture_repo() { + local repo + repo="$(mktemp -d)" + + mkdir -p "$repo/scripts" \ + "$repo/audit" \ + "$repo/androidApp/src/androidTest/kotlin/example" \ + "$repo/iosApp/iosAppUITests" + + cp "$PROJECT_ROOT/scripts/validate-test-quality.sh" "$repo/scripts/" + chmod +x "$repo/scripts/validate-test-quality.sh" + + cat > "$repo/androidApp/src/androidTest/kotlin/example/ExampleUiTest.kt" <<'EOF' +package example + +import org.junit.Test + +class ExampleUiTest { + @Test + fun usesAntiPatternsForFixture() { + Thread.sleep(5) + try { + // fixture-only + } catch (e: AssertionError) { + // fixture-only + } + } +} +EOF + + cat > "$repo/iosApp/iosAppUITests/ExampleUiTests.swift" <<'EOF' +import XCTest + +final class ExampleUiTests: XCTestCase { + func testFixtureUsesAntiPatterns() { + sleep(1) + if XCUIApplication().buttons["Example"].exists { + XCTAssertTrue(true) + } + } +} +EOF + + cat > "$repo/audit/test-quality-baseline.json" <<'EOF' +{ + "version": 1, + "generated_at": "2026-02-20T16:00:00Z", + "metrics": [ + { + "id": "android_thread_sleep_calls", + "description": "Android Thread.sleep", + "mode": "rg", + "root": "androidApp/src/androidTest", + "glob": "*.kt", + "pattern": "Thread\\.sleep\\(", + "baseline": 1, + "allowed_growth": 0 + }, + { + "id": "android_assertionerror_catches", + "description": "Android AssertionError catches", + "mode": "rg", + "root": "androidApp/src/androidTest", + "glob": "*.kt", + "pattern": "catch \\([^\\)]*AssertionError", + "baseline": 1, + "allowed_growth": 0 + }, + { + "id": "ios_sleep_calls", + "description": "iOS sleep calls", + "mode": "rg", + "root": "iosApp/iosAppUITests", + "glob": "*.swift", + "pattern": "\\bsleep\\(", + "baseline": 1, + "allowed_growth": 0 + }, + { + "id": "ios_conditional_exists_guards_in_test_bodies", + "description": "iOS conditional exists checks in test bodies", + "mode": "swift_test_body_pattern", + "root": "iosApp/iosAppUITests", + "glob": "*.swift", + "pattern": "if[[:space:]]+[^\\n]*\\.exists", + "baseline": 1, + "allowed_growth": 0 + }, + { + "id": "ios_noop_assert_true", + "description": "iOS no-op assertTrue(true) in test bodies", + "mode": "swift_test_body_pattern", + "root": "iosApp/iosAppUITests", + "glob": "*.swift", + "pattern": "XCTAssertTrue\\(true\\)", + "baseline": 1, + "allowed_growth": 0 + }, + { + "id": "ios_empty_test_bodies", + "description": "iOS empty or comment-only test bodies", + "mode": "rg_multiline", + "root": "iosApp/iosAppUITests", + "glob": "*.swift", + "pattern": "(?s)func\\s+test[[:alnum:]_]+\\s*\\([^)]*\\)\\s*(?:throws\\s*)?\\{\\s*(?:(?://[^\\n]*\\n)\\s*)*\\}", + "baseline": 0, + "allowed_growth": 0 + }, + { + "id": "ios_placeholder_test_markers", + "description": "iOS placeholder markers in test bodies", + "mode": "swift_test_body_pattern", + "root": "iosApp/iosAppUITests", + "glob": "*.swift", + "pattern": "(TODO|FIXME|placeholder|no-op)", + "baseline": 0, + "allowed_growth": 0 + } + ] +} +EOF + + TMP_REPOS+=("$repo") + echo "$repo" +} + +test_baseline_pass() { + local repo + repo="$(create_fixture_repo)" + run_expect_success "validate-test-quality passes when metrics match baseline" \ + bash -lc "cd '$repo' && scripts/validate-test-quality.sh" +} + +test_growth_fails() { + local repo + repo="$(create_fixture_repo)" + echo "Thread.sleep(10)" >> "$repo/androidApp/src/androidTest/kotlin/example/ExampleUiTest.kt" + run_expect_failure "validate-test-quality fails when metric grows past threshold" \ + bash -lc "cd '$repo' && scripts/validate-test-quality.sh" +} + +test_allowed_growth_passes() { + local repo + repo="$(create_fixture_repo)" + + local tmp + tmp="$(mktemp)" + jq '(.metrics[] | select(.id == "ios_sleep_calls") | .allowed_growth) = 1' \ + "$repo/audit/test-quality-baseline.json" > "$tmp" + mv "$tmp" "$repo/audit/test-quality-baseline.json" + + echo "sleep(1)" >> "$repo/iosApp/iosAppUITests/ExampleUiTests.swift" + + run_expect_success "validate-test-quality honors allowed_growth threshold" \ + bash -lc "cd '$repo' && scripts/validate-test-quality.sh" +} + +main() { + require_cmd jq + require_cmd rg + require_cmd awk + + test_baseline_pass + test_growth_fails + test_allowed_growth_passes + + log "Summary: pass=$PASS_COUNT fail=$FAIL_COUNT" + if [[ "$FAIL_COUNT" -gt 0 ]]; then + exit 1 + fi + return 0 +} + +cleanup() { + local repo + for repo in "${TMP_REPOS[@]:-}"; do + [[ -d "$repo" ]] && rm -rf "$repo" + done + rm -f /tmp/test-quality-gate.out + return 0 +} + +trap cleanup EXIT + +main "$@" diff --git a/runners-conversion/periodVault/validate-audit-report.sh b/runners-conversion/periodVault/validate-audit-report.sh new file mode 100755 index 0000000..2c6dbec --- /dev/null +++ b/runners-conversion/periodVault/validate-audit-report.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +# validate-audit-report.sh +# Structural + semantic validation for CODEX audit reports. +set -euo pipefail + +REPORT_PATH="${1:-CODEX-REPORT.md}" + +if [[ ! -f "$REPORT_PATH" ]]; then + echo "[validate-audit-report] Missing report: $REPORT_PATH" + exit 1 +fi + +FAILURES=0 + +# --- Check 1: Required sections exist --- +required_sections=( + "## Requirements Mapping" + "## Constitution Compliance Matrix" + "## Evidence" + "## Risks" +) + +for section in "${required_sections[@]}"; do + if command -v rg >/dev/null 2>&1; then + if ! rg -q "^${section//\//\\/}$" "$REPORT_PATH"; then + echo "[validate-audit-report] Missing section: $section" + FAILURES=$((FAILURES + 1)) + fi + else + if ! grep -Eq "^${section//\//\\/}$" "$REPORT_PATH"; then + echo "[validate-audit-report] Missing section: $section" + FAILURES=$((FAILURES + 1)) + fi + fi +done + +# --- Check 2: Reject forbidden placeholders --- +forbidden_patterns=("TODO" "TBD" "UNMAPPED" "PLACEHOLDER" "FIXME") +for pattern in "${forbidden_patterns[@]}"; do + if command -v rg >/dev/null 2>&1; then + count="$(rg -c "$pattern" "$REPORT_PATH" 2>/dev/null || echo 0)" + else + count="$(grep -c "$pattern" "$REPORT_PATH" 2>/dev/null || echo 0)" + fi + if [[ "$count" -gt 0 ]]; then + echo "[validate-audit-report] Forbidden placeholder '$pattern' found ($count occurrences)" + FAILURES=$((FAILURES + 1)) + fi +done + +# --- Check 3: Non-empty sections (at least 1 non-blank line after heading) --- +for section in "${required_sections[@]}"; do + # Extract content between this heading and the next ## heading (or EOF) + section_escaped="${section//\//\\/}" + content="" + if command -v awk >/dev/null 2>&1; then + content="$(awk "/^${section_escaped}\$/{found=1; next} found && /^## /{exit} found{print}" "$REPORT_PATH" | grep -v '^[[:space:]]*$' || true)" + fi + if [[ -z "$content" ]]; then + echo "[validate-audit-report] Section is empty: $section" + FAILURES=$((FAILURES + 1)) + fi +done + +# --- Check 4: Requirements mapping has entries (table rows or list items) --- +req_entries="$(awk '/^## Requirements Mapping$/{found=1; next} found && /^## /{exit} found && /^\|[^-]/{print} found && /^- /{print}' "$REPORT_PATH" | wc -l | tr -d ' ')" +if [[ "$req_entries" -lt 1 ]]; then + echo "[validate-audit-report] Requirements Mapping has no entries (expected table rows or list items)" + FAILURES=$((FAILURES + 1)) +fi + +# --- Result --- +if [[ $FAILURES -gt 0 ]]; then + echo "[validate-audit-report] FAILED ($FAILURES issues)" + exit 1 +fi + +echo "[validate-audit-report] PASS ($REPORT_PATH)" diff --git a/runners-conversion/periodVault/validate-ios-skipped-tests.sh b/runners-conversion/periodVault/validate-ios-skipped-tests.sh new file mode 100755 index 0000000..31e434b --- /dev/null +++ b/runners-conversion/periodVault/validate-ios-skipped-tests.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# validate-ios-skipped-tests.sh — Fail when iOS test results contain non-allowlisted skipped tests. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +usage() { + cat <<'EOF' +Usage: + scripts/validate-ios-skipped-tests.sh [allowlist_file] + +Arguments: + xcresult_path Path to .xcresult bundle generated by xcodebuild test + allowlist_file Optional allowlist of skipped test names (one per line, # comments allowed) + Default: audit/ios-skipped-tests-allowlist.txt +EOF +} + +if [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then + usage + exit 0 +fi + +RESULT_PATH="${1:-}" +if [[ -z "$RESULT_PATH" ]]; then + usage + exit 1 +fi + +ALLOWLIST_PATH="${2:-$PROJECT_ROOT/audit/ios-skipped-tests-allowlist.txt}" + +require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "Missing required command: $1" >&2 + exit 1 + fi +} + +require_cmd xcrun +require_cmd jq +require_cmd sort +require_cmd comm +require_cmd mktemp + +if [[ ! -d "$RESULT_PATH" ]]; then + echo "xcresult bundle not found: $RESULT_PATH" >&2 + exit 1 +fi + +TMP_JSON="$(mktemp)" +TMP_SKIPPED="$(mktemp)" +TMP_ALLOWLIST="$(mktemp)" +TMP_UNALLOWED="$(mktemp)" + +cleanup() { + rm -f "$TMP_JSON" "$TMP_SKIPPED" "$TMP_ALLOWLIST" "$TMP_UNALLOWED" +} +trap cleanup EXIT + +if ! xcrun xcresulttool get test-results tests --path "$RESULT_PATH" --format json > "$TMP_JSON" 2>/dev/null; then + echo "Failed to parse xcresult test results: $RESULT_PATH" >&2 + exit 1 +fi + +jq -r ' + .. | objects + | select((.result == "Skipped") or (.status == "Skipped") or (.outcome == "Skipped") or (.testStatus == "Skipped")) + | (.name // .identifier // empty) +' "$TMP_JSON" | sed '/^[[:space:]]*$/d' | sort -u > "$TMP_SKIPPED" + +if [[ -f "$ALLOWLIST_PATH" ]]; then + { + grep -vE '^[[:space:]]*(#|$)' "$ALLOWLIST_PATH" || true + } | sed 's/[[:space:]]*$//' | sed '/^[[:space:]]*$/d' | sort -u > "$TMP_ALLOWLIST" +else + : > "$TMP_ALLOWLIST" +fi + +if [[ ! -s "$TMP_SKIPPED" ]]; then + echo "Skipped-test gate: PASS (no skipped iOS tests)" + exit 0 +fi + +comm -23 "$TMP_SKIPPED" "$TMP_ALLOWLIST" > "$TMP_UNALLOWED" + +if [[ -s "$TMP_UNALLOWED" ]]; then + echo "Skipped-test gate: FAIL (non-allowlisted skipped iOS tests found)" + cat "$TMP_UNALLOWED" | sed 's/^/ - /' + if [[ -s "$TMP_ALLOWLIST" ]]; then + echo "Allowlist used: $ALLOWLIST_PATH" + else + echo "Allowlist is empty: $ALLOWLIST_PATH" + fi + exit 1 +fi + +echo "Skipped-test gate: PASS (all skipped iOS tests are allowlisted)" +exit 0 diff --git a/runners-conversion/periodVault/validate-sdd.sh b/runners-conversion/periodVault/validate-sdd.sh new file mode 100755 index 0000000..eaf3a22 --- /dev/null +++ b/runners-conversion/periodVault/validate-sdd.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# validate-sdd.sh +# Ensures changed spec folders keep mandatory SDD artifacts. +set -euo pipefail + +BASE_REF="${1:-origin/main}" + +if ! git rev-parse --verify "$BASE_REF" >/dev/null 2>&1; then + BASE_REF="HEAD~1" +fi + +CHANGED_SPEC_FILES=() +while IFS= read -r line; do + [[ -n "$line" ]] && CHANGED_SPEC_FILES+=("$line") +done < <(git diff --name-only "$BASE_REF"...HEAD -- 'specs/**') + +if [[ ${#CHANGED_SPEC_FILES[@]} -eq 0 ]]; then + echo "[validate-sdd] No changes under specs/." + exit 0 +fi + +SPEC_DIRS=() +add_unique_dir() { + local candidate="$1" + local existing + for existing in "${SPEC_DIRS[@]:-}"; do + [[ "$existing" == "$candidate" ]] && return 0 + done + SPEC_DIRS+=("$candidate") +} + +for path in "${CHANGED_SPEC_FILES[@]}"; do + if [[ "$path" =~ ^specs/[^/]+/ ]]; then + spec_dir="$(echo "$path" | cut -d/ -f1-2)" + add_unique_dir "$spec_dir" + fi +done + +if [[ ${#SPEC_DIRS[@]} -eq 0 ]]; then + echo "[validate-sdd] PASS (no feature spec directories changed)" + exit 0 +fi + +FAILED=0 +for dir in "${SPEC_DIRS[@]}"; do + for required in spec.md plan.md tasks.md allowed-files.txt; do + if [[ ! -f "$dir/$required" ]]; then + echo "[validate-sdd] Missing required file: $dir/$required" + FAILED=1 + fi + done +done + +if [[ $FAILED -ne 0 ]]; then + exit 1 +fi + +echo "[validate-sdd] PASS ($BASE_REF...HEAD)" diff --git a/runners-conversion/periodVault/validate-tdd.sh b/runners-conversion/periodVault/validate-tdd.sh new file mode 100755 index 0000000..ad721b6 --- /dev/null +++ b/runners-conversion/periodVault/validate-tdd.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# validate-tdd.sh +# Guard that production code changes are accompanied by tests. +set -euo pipefail + +BASE_REF="${1:-origin/main}" + +if ! git rev-parse --verify "$BASE_REF" >/dev/null 2>&1; then + BASE_REF="HEAD~1" +fi + +CHANGED_FILES=() +while IFS= read -r line; do + [[ -n "$line" ]] && CHANGED_FILES+=("$line") +done < <(git diff --name-only "$BASE_REF"...HEAD) + +if [[ ${#CHANGED_FILES[@]} -eq 0 ]]; then + echo "[validate-tdd] No changed files." + exit 0 +fi + +is_production_file() { + local f="$1" + [[ "$f" == shared/src/commonMain/* ]] && return 0 + [[ "$f" == androidApp/src/main/* ]] && return 0 + [[ "$f" == iosApp/iosApp/* ]] && [[ "$f" != iosApp/iosAppUITests/* ]] && [[ "$f" != iosApp/iosAppTests/* ]] && return 0 + return 1 +} + +is_test_file() { + local f="$1" + [[ "$f" == shared/src/commonTest/* ]] && return 0 + [[ "$f" == shared/src/jvmTest/* ]] && return 0 + [[ "$f" == androidApp/src/androidTest/* ]] && return 0 + [[ "$f" == androidApp/src/test/* ]] && return 0 + [[ "$f" == iosApp/iosAppUITests/* ]] && return 0 + [[ "$f" == iosApp/iosAppTests/* ]] && return 0 + return 1 +} + +PROD_COUNT=0 +TEST_COUNT=0 +for file in "${CHANGED_FILES[@]}"; do + if is_production_file "$file"; then + PROD_COUNT=$((PROD_COUNT + 1)) + fi + if is_test_file "$file"; then + TEST_COUNT=$((TEST_COUNT + 1)) + fi +done + +if [[ "$PROD_COUNT" -gt 0 && "$TEST_COUNT" -eq 0 ]]; then + echo "[validate-tdd] Failing: production code changed without matching test updates." + echo "[validate-tdd] Production files changed: $PROD_COUNT" + exit 1 +fi + +CHANGED_TEST_FILES=() +TEST_PATH_REGEX='^(shared/src/(commonTest|jvmTest)/|androidApp/src/(androidTest|test)/|iosApp/iosApp(UI)?Tests/)' +while IFS= read -r line; do + [[ -n "$line" ]] && CHANGED_TEST_FILES+=("$line") +done < <( + if command -v rg >/dev/null 2>&1; then + printf '%s\n' "${CHANGED_FILES[@]}" | rg "$TEST_PATH_REGEX" || true + else + printf '%s\n' "${CHANGED_FILES[@]}" | grep -E "$TEST_PATH_REGEX" || true + fi +) +for test_file in "${CHANGED_TEST_FILES[@]:-}"; do + if [[ -f "$test_file" ]]; then + if command -v rg >/dev/null 2>&1; then + if rg -q 'catch[[:space:]]*\([[:space:]]*AssertionError|XCTExpectFailure|@Ignore|@Disabled' "$test_file"; then + echo "[validate-tdd] Failing: potential weak assertion/skip anti-pattern in $test_file" + exit 1 + fi + else + if grep -Eq 'catch[[:space:]]*\([[:space:]]*AssertionError|XCTExpectFailure|@Ignore|@Disabled' "$test_file"; then + echo "[validate-tdd] Failing: potential weak assertion/skip anti-pattern in $test_file" + exit 1 + fi + fi + fi +done + +if [[ "${FORCE_AUDIT_GATES:-0}" == "1" ]]; then + echo "[validate-tdd] FORCE_AUDIT_GATES enabled." +fi + +echo "[validate-tdd] PASS ($BASE_REF...HEAD)" diff --git a/runners-conversion/periodVault/validate-test-quality.sh b/runners-conversion/periodVault/validate-test-quality.sh new file mode 100755 index 0000000..4e0675b --- /dev/null +++ b/runners-conversion/periodVault/validate-test-quality.sh @@ -0,0 +1,358 @@ +#!/usr/bin/env bash +# validate-test-quality.sh — Enforce anti-pattern regression thresholds for UI tests. +# +# Usage: +# scripts/validate-test-quality.sh [baseline_file] +# scripts/validate-test-quality.sh --help +# +# Behavior: +# - Loads metric baselines from JSON. +# - Counts pattern matches in configured roots via ripgrep. +# - Fails if any metric exceeds baseline + allowed_growth. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +DEFAULT_BASELINE_FILE="$PROJECT_ROOT/audit/test-quality-baseline.json" +BASELINE_FILE="${1:-$DEFAULT_BASELINE_FILE}" + +usage() { + cat <<'EOF' +validate-test-quality.sh: enforce UI test anti-pattern regression thresholds. + +Usage: + scripts/validate-test-quality.sh [baseline_file] + scripts/validate-test-quality.sh --help +EOF +} + +if [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then + usage + exit 0 +fi + +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' + +FAILURES=0 + +fail() { + echo -e "${RED}FAIL:${NC} $1" + FAILURES=$((FAILURES + 1)) +} + +pass() { + echo -e "${GREEN}PASS:${NC} $1" +} + +require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + fail "Missing required command: $1" + fi +} + +relative_path() { + local path="$1" + if [[ "$path" == "$PROJECT_ROOT/"* ]]; then + echo "${path#$PROJECT_ROOT/}" + else + echo "$path" + fi +} + +count_matches() { + local pattern="$1" + local root="$2" + local glob="$3" + local output="" + local status=0 + + set +e + output="$(rg --count-matches --no-messages --pcre2 -N -g "$glob" "$pattern" "$root" 2>/dev/null)" + status=$? + set -e + + if [[ "$status" -eq 1 ]]; then + echo "0" + return 0 + fi + + if [[ "$status" -ne 0 ]]; then + return "$status" + fi + + if [[ -z "$output" ]]; then + echo "0" + return 0 + fi + + echo "$output" | awk -F: '{sum += $NF} END {print sum + 0}' +} + +count_matches_multiline() { + local pattern="$1" + local root="$2" + local glob="$3" + local output="" + local status=0 + + set +e + output="$(rg --count-matches --no-messages --pcre2 -U -N -g "$glob" "$pattern" "$root" 2>/dev/null)" + status=$? + set -e + + if [[ "$status" -eq 1 ]]; then + echo "0" + return 0 + fi + + if [[ "$status" -ne 0 ]]; then + return "$status" + fi + + if [[ -z "$output" ]]; then + echo "0" + return 0 + fi + + echo "$output" | awk -F: '{sum += $NF} END {print sum + 0}' +} + +list_metric_files() { + local root="$1" + local glob="$2" + rg --files "$root" -g "$glob" 2>/dev/null || true +} + +count_swift_test_body_pattern_matches() { + local pattern="$1" + local root="$2" + local glob="$3" + local files=() + + while IFS= read -r file_path; do + [[ -n "$file_path" ]] && files+=("$file_path") + done < <(list_metric_files "$root" "$glob") + if [[ "${#files[@]}" -eq 0 ]]; then + echo "0" + return 0 + fi + + awk -v pattern="$pattern" ' + function update_depth(line, i, c) { + for (i = 1; i <= length(line); i++) { + c = substr(line, i, 1) + if (c == "{") depth++ + else if (c == "}") depth-- + } + } + + /^[[:space:]]*func[[:space:]]+test[[:alnum:]_]+[[:space:]]*\(.*\)[[:space:]]*(throws)?[[:space:]]*\{/ { + in_test = 1 + depth = 0 + update_depth($0) + if ($0 ~ pattern) count++ + if (depth <= 0) { + in_test = 0 + depth = 0 + } + next + } + + { + if (!in_test) next + + if ($0 ~ pattern) count++ + update_depth($0) + if (depth <= 0) { + in_test = 0 + depth = 0 + } + } + + END { print count + 0 } + ' "${files[@]}" +} + +count_swift_empty_test_bodies() { + local root="$1" + local glob="$2" + local files=() + + while IFS= read -r file_path; do + [[ -n "$file_path" ]] && files+=("$file_path") + done < <(list_metric_files "$root" "$glob") + if [[ "${#files[@]}" -eq 0 ]]; then + echo "0" + return 0 + fi + + awk ' + function update_depth(line, i, c) { + for (i = 1; i <= length(line); i++) { + c = substr(line, i, 1) + if (c == "{") depth++ + else if (c == "}") depth-- + } + } + + function test_body_has_code(body, cleaned) { + cleaned = body + gsub(/\/\/.*/, "", cleaned) + gsub(/[ \t\r\n{}]/, "", cleaned) + return cleaned != "" + } + + /^[[:space:]]*func[[:space:]]+test[[:alnum:]_]+[[:space:]]*\(.*\)[[:space:]]*(throws)?[[:space:]]*\{/ { + in_test = 1 + depth = 0 + body = "" + update_depth($0) + if (depth <= 0) { + empty_count++ + in_test = 0 + depth = 0 + body = "" + } + next + } + + { + if (!in_test) next + + body = body $0 "\n" + update_depth($0) + + if (depth <= 0) { + if (!test_body_has_code(body)) { + empty_count++ + } + in_test = 0 + depth = 0 + body = "" + } + } + + END { print empty_count + 0 } + ' "${files[@]}" +} + +count_metric() { + local mode="$1" + local pattern="$2" + local root="$3" + local glob="$4" + + case "$mode" in + rg) + count_matches "$pattern" "$root" "$glob" + ;; + rg_multiline) + count_matches_multiline "$pattern" "$root" "$glob" + ;; + swift_test_body_pattern) + count_swift_test_body_pattern_matches "$pattern" "$root" "$glob" + ;; + swift_empty_test_bodies) + count_swift_empty_test_bodies "$root" "$glob" + ;; + *) + echo "__INVALID_MODE__:$mode" + return 0 + ;; + esac +} + +require_cmd jq +require_cmd rg +require_cmd awk + +echo "=== Test Quality Gate ===" +echo "Baseline: $(relative_path "$BASELINE_FILE")" + +if [[ ! -f "$BASELINE_FILE" ]]; then + fail "Baseline file not found: $(relative_path "$BASELINE_FILE")" + echo "" + echo -e "${RED}Test quality gate failed with $FAILURES issue(s).${NC}" + exit 1 +fi + +if jq -e '.metrics | type == "array" and length > 0' "$BASELINE_FILE" >/dev/null; then + pass "Baseline includes metric definitions" +else + fail "Baseline file has no metrics" +fi + +while IFS= read -r metric; do + [[ -z "$metric" ]] && continue + + metric_id="$(jq -r '.id // empty' <<<"$metric")" + description="$(jq -r '.description // empty' <<<"$metric")" + mode="$(jq -r '.mode // "rg"' <<<"$metric")" + root_rel="$(jq -r '.root // empty' <<<"$metric")" + glob="$(jq -r '.glob // empty' <<<"$metric")" + pattern="$(jq -r '.pattern // ""' <<<"$metric")" + baseline="$(jq -r '.baseline // empty' <<<"$metric")" + allowed_growth="$(jq -r '.allowed_growth // empty' <<<"$metric")" + + if [[ -z "$metric_id" || -z "$root_rel" || -z "$glob" || -z "$baseline" || -z "$allowed_growth" ]]; then + fail "Metric entry is missing required fields: $metric" + continue + fi + + if [[ "$mode" != "swift_empty_test_bodies" && -z "$pattern" ]]; then + fail "Metric '$metric_id' requires non-empty pattern for mode '$mode'" + continue + fi + + if ! [[ "$baseline" =~ ^[0-9]+$ ]]; then + fail "Metric '$metric_id' has non-numeric baseline: $baseline" + continue + fi + + if ! [[ "$allowed_growth" =~ ^[0-9]+$ ]]; then + fail "Metric '$metric_id' has non-numeric allowed_growth: $allowed_growth" + continue + fi + + if [[ "$root_rel" == /* ]]; then + root_path="$root_rel" + else + root_path="$PROJECT_ROOT/$root_rel" + fi + + if [[ ! -d "$root_path" ]]; then + fail "Metric '$metric_id' root directory not found: $(relative_path "$root_path")" + continue + fi + + current_count="0" + if ! current_count="$(count_metric "$mode" "$pattern" "$root_path" "$glob")"; then + fail "Metric '$metric_id' failed while counting matches" + continue + fi + + if [[ "$current_count" == __INVALID_MODE__:* ]]; then + fail "Metric '$metric_id' uses unsupported mode '$mode'" + continue + fi + + max_allowed=$((baseline + allowed_growth)) + delta=$((current_count - baseline)) + + if [[ "$current_count" -le "$max_allowed" ]]; then + pass "$metric_id ($description): current=$current_count baseline=$baseline allowed_growth=$allowed_growth threshold=$max_allowed delta=$delta" + else + fail "$metric_id ($description): current=$current_count exceeds threshold=$max_allowed (baseline=$baseline allowed_growth=$allowed_growth delta=$delta)" + fi +done < <(jq -c '.metrics[]' "$BASELINE_FILE") + +echo "" +if [[ "$FAILURES" -eq 0 ]]; then + echo -e "${GREEN}Test quality gate passed.${NC}" + exit 0 +fi + +echo -e "${RED}Test quality gate failed with $FAILURES issue(s).${NC}" +exit 1 diff --git a/runners-conversion/periodVault/verify.sh b/runners-conversion/periodVault/verify.sh new file mode 100755 index 0000000..c2613b8 --- /dev/null +++ b/runners-conversion/periodVault/verify.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +set -euo pipefail + +GRADLEW="./gradlew" + +if [[ ! -x "$GRADLEW" ]]; then + echo "Missing or non-executable ./gradlew. Did you generate the Gradle wrapper?" + exit 1 +fi + +# Get the task list once (quiet output to reduce noise) +ALL_TASKS="$($GRADLEW -q tasks --all || true)" + +if [[ -z "$ALL_TASKS" ]]; then + echo "Could not read Gradle tasks. Exiting." + exit 1 +fi + +# Prefer KMP aggregate task when available +if echo "$ALL_TASKS" | grep -qE '^allTests[[:space:]]+-'; then + TASKS="allTests" +else + # Fallback: collect common test tasks, excluding device-dependent instrumentation tests + TASKS="$( + echo "$ALL_TASKS" \ + | awk '{print $1}' \ + | grep -E '(^test$|Test$|^check$)' \ + | grep -v 'AndroidTest' \ + | grep -v 'connectedAndroidTest' \ + | grep -v 'deviceAndroidTest' \ + | sort -u \ + | tr '\n' ' ' + )" +fi + +# Strip spaces and validate +if [[ -z "${TASKS// /}" ]]; then + echo "No test tasks found. Exiting." + exit 1 +fi + +echo "Running: $GRADLEW $TASKS" +# Run all tasks in one go (faster, simpler) +$GRADLEW $TASKS + +echo "===================" +echo "ALL TESTS PASSED!" +echo "===================" + +# --- Commit, push, and create PR if on a feature branch --- + +# Skip commit/push/PR when invoked from a git hook (prevents infinite loop) +if [[ "${GIT_PUSH_IN_PROGRESS:-}" == "1" ]] || [[ -n "${GIT_DIR:-}" && "${GIT_DIR}" != ".git" ]]; then + exit 0 +fi + +BRANCH="$(git rev-parse --abbrev-ref HEAD)" +MAIN_BRANCH="main" + +if [[ "$BRANCH" == "$MAIN_BRANCH" ]]; then + echo "On $MAIN_BRANCH — skipping commit/push/PR." + exit 0 +fi + +# Stage all changes (except untracked files the user hasn't added) +if git diff --quiet && git diff --cached --quiet && [[ -z "$(git ls-files --others --exclude-standard)" ]]; then + echo "No changes to commit." +else + git add -A + COMMIT_MSG="feat: $(echo "$BRANCH" | sed 's/^[0-9]*-//' | tr '-' ' ')" + git commit -m "$COMMIT_MSG" || echo "Nothing to commit." +fi + +# Push branch to remote (skip hooks to avoid re-triggering verify.sh) +GIT_PUSH_IN_PROGRESS=1 git push --no-verify -u origin "$BRANCH" + +# Create PR if one doesn't already exist for this branch +if gh pr view "$BRANCH" --json state >/dev/null 2>&1; then + PR_URL="$(gh pr view "$BRANCH" --json url -q '.url')" + echo "PR already exists: $PR_URL" +else + TITLE="$(echo "$BRANCH" | sed 's/^[0-9]*-//' | tr '-' ' ' | awk '{for(i=1;i<=NF;i++) $i=toupper(substr($i,1,1)) substr($i,2)}1')" + PR_URL="$(gh pr create --title "$TITLE" --body "Automated PR from verify.sh" --base "$MAIN_BRANCH" --head "$BRANCH")" + echo "PR created: $PR_URL" +fi \ No newline at end of file diff --git a/setup/nginx-to-caddy/Caddyfile.recommended b/setup/nginx-to-caddy/Caddyfile.recommended index 62f359e..40dc810 100644 --- a/setup/nginx-to-caddy/Caddyfile.recommended +++ b/setup/nginx-to-caddy/Caddyfile.recommended @@ -51,6 +51,25 @@ ai.sintheus.com { } } +getter.sintheus.com { + import common_security + + reverse_proxy http://192.168.1.3:8181 { + import proxy_headers + } +} + +portainer.sintheus.com { + import common_security + + reverse_proxy https://192.168.1.181:9443 { + import proxy_headers + transport http { + tls_insecure_skip_verify + } + } +} + photos.sintheus.com { import common_security diff --git a/toggle_dns.sh b/setup/nginx-to-caddy/toggle_dns.sh similarity index 98% rename from toggle_dns.sh rename to setup/nginx-to-caddy/toggle_dns.sh index 775152a..bee19df 100755 --- a/toggle_dns.sh +++ b/setup/nginx-to-caddy/toggle_dns.sh @@ -5,7 +5,7 @@ set -euo pipefail # Usage: ./toggle_dns.sh # Requires sudo for networksetup. -PIHOLE="pi.sintheus.com" +PIHOLE="192.168.1.4" CLOUDFLARE="1.1.1.1" # Get all hardware network services (Wi-Fi, Ethernet, Thunderbolt, USB, etc.) diff --git a/setup/pi-monitoring/bootstrap_pi.sh b/setup/pi-monitoring/bootstrap_pi.sh index b91f864..ba5f2df 100755 --- a/setup/pi-monitoring/bootstrap_pi.sh +++ b/setup/pi-monitoring/bootstrap_pi.sh @@ -5,7 +5,7 @@ SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" # shellcheck source=./lib.sh source "$SCRIPT_DIR/lib.sh" -TIMEZONE="America/New_York" +TIMEZONE="America/Chicago" SSH_PORT="22" AUTO_YES=false ENABLE_UFW=true @@ -17,14 +17,14 @@ Usage: $(basename "$0") [options] Prepare a brand-new Raspberry Pi OS host for monitoring stack workloads. Options: - --timezone=ZONE Set system timezone (default: America/New_York) + --timezone=ZONE Set system timezone (default: America/Chicago) --ssh-port=PORT SSH port allowed by firewall (default: 22) --skip-firewall Skip UFW configuration --yes, -y Non-interactive; skip confirmation prompts --help, -h Show help Example: - $(basename "$0") --timezone=America/New_York --yes + $(basename "$0") --timezone=America/Chicago --yes USAGE } @@ -39,6 +39,19 @@ for arg in "$@"; do esac done +# Validate --ssh-port (must be 1-65535) before we risk enabling UFW with a bad rule +if ! [[ "$SSH_PORT" =~ ^[0-9]+$ ]] || [[ "$SSH_PORT" -lt 1 ]] || [[ "$SSH_PORT" -gt 65535 ]]; then + log_error "--ssh-port must be a number between 1 and 65535 (got: '$SSH_PORT')" + exit 1 +fi + +# Validate --timezone against timedatectl's known list +if ! timedatectl list-timezones 2>/dev/null | grep -qx "$TIMEZONE"; then + log_error "Unknown timezone: '$TIMEZONE'" + log_error "Run 'timedatectl list-timezones' for valid options" + exit 1 +fi + require_cmd sudo apt systemctl timedatectl curl if ! confirm_action "This will install/update OS packages and Docker on this Pi. Continue?" "$AUTO_YES"; then @@ -85,6 +98,10 @@ sudo systemctl enable --now docker log_info "Configuring Docker daemon defaults..." sudo mkdir -p /etc/docker +if [[ -f /etc/docker/daemon.json ]]; then + sudo cp /etc/docker/daemon.json /etc/docker/daemon.json.bak + log_info "Backed up existing daemon.json to daemon.json.bak" +fi sudo tee /etc/docker/daemon.json >/dev/null <<'JSON' { "log-driver": "json-file", @@ -119,5 +136,5 @@ fi log_success "Bootstrap complete" log_info "Recommended next steps:" log_info "1) Re-login to apply docker group membership" -log_info "2) Run setup/pi-monitoring/mount_ssd.sh" +log_info "2) (Optional) Run setup/pi-monitoring/mount_ssd.sh if you have an SSD" log_info "3) Copy stack.env.example to stack.env and run deploy_stack.sh" diff --git a/setup/pi-monitoring/docker-compose.yml b/setup/pi-monitoring/docker-compose.yml index d9ce783..9fd181e 100644 --- a/setup/pi-monitoring/docker-compose.yml +++ b/setup/pi-monitoring/docker-compose.yml @@ -40,7 +40,9 @@ services: command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - - '--storage.tsdb.retention.time=30d' + - '--storage.tsdb.retention.time=${PROMETHEUS_RETENTION_TIME:-15d}' + - '--storage.tsdb.retention.size=${PROMETHEUS_RETENTION_SIZE:-2GB}' + - '--storage.tsdb.wal-compression' - '--web.enable-lifecycle' ports: - "${BIND_IP:-0.0.0.0}:${PROMETHEUS_PORT:-9090}:9090" diff --git a/setup/pi-monitoring/portainer-agent.sh b/setup/pi-monitoring/portainer-agent.sh new file mode 100644 index 0000000..2344fdb --- /dev/null +++ b/setup/pi-monitoring/portainer-agent.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + set -euo pipefail + + docker run -d \ + -p 9001:9001 \ + --name portainer_agent \ + --restart=always \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v /var/lib/docker/volumes:/var/lib/docker/volumes \ + -v /:/host \ + portainer/agent:2.39.0 \ No newline at end of file diff --git a/setup/pi-monitoring/stack.env.example b/setup/pi-monitoring/stack.env.example index 4d3fa0c..b27d183 100644 --- a/setup/pi-monitoring/stack.env.example +++ b/setup/pi-monitoring/stack.env.example @@ -10,7 +10,7 @@ COMPOSE_PROJECT_NAME=pi-monitoring OPS_ROOT=/srv/ops # Host timezone for containers -TZ=America/New_York +TZ=America/Chicago # Bind IP for published ports (0.0.0.0 = all interfaces) BIND_IP=0.0.0.0 @@ -34,6 +34,11 @@ UPTIME_KUMA_IMAGE=louislam/uptime-kuma:1 GRAFANA_ADMIN_USER=admin GRAFANA_ADMIN_PASSWORD=replace-with-strong-password +# Prometheus retention (whichever limit is hit first wins) +# Reduce these on microSD to prevent filling the card. +PROMETHEUS_RETENTION_TIME=15d +PROMETHEUS_RETENTION_SIZE=2GB + # Optional comma-separated plugin list for Grafana # Example: grafana-piechart-panel,grafana-clock-panel GRAFANA_PLUGINS=