- Phase 10: local repo cutover (rename origin→github, add Gitea remote, push branches/tags) - Phase 11: custom runner infrastructure with toolchain-based naming (go-node-runner, jvm-android-runner) and repo variables via Gitea API - Add container_options support to manage_runner.sh for KVM passthrough - Phase 8: add --allow-direct-checks flag for LAN/split-DNS staging - Phase 7.5: add Cloudflare TLS block, retry logic for probes, multi-upstream support - Add toggle_dns.sh helper and update orchestration scripts for phases 10-11 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
619 lines
23 KiB
Bash
Executable File
619 lines
23 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
# =============================================================================
|
|
# phase8_cutover.sh — HTTPS via Caddy + Mark GitHub repos as mirrors
|
|
# Depends on: All prior phases complete, required Docker network present on Unraid
|
|
# This is the "go live" script — after this, Gitea is the primary git host.
|
|
#
|
|
# Caddy handles TLS automatically. TLS_MODE from .env controls how:
|
|
# "cloudflare" → DNS-01 via Cloudflare API (wildcard cert)
|
|
# "existing" → user provides cert/key paths
|
|
#
|
|
# After HTTPS is live, GitHub repos are marked as offsite mirrors.
|
|
# =============================================================================
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
source "${SCRIPT_DIR}/lib/common.sh"
|
|
|
|
ALLOW_DIRECT_CHECKS=false
|
|
|
|
usage() {
|
|
cat <<EOF
|
|
Usage: $(basename "$0") [options]
|
|
|
|
Options:
|
|
--allow-direct-checks Allow fallback to direct Caddy-IP checks via --resolve
|
|
(LAN/split-DNS staging mode; not a full public cutover)
|
|
--help, -h Show this help
|
|
EOF
|
|
}
|
|
|
|
for arg in "$@"; do
|
|
case "$arg" in
|
|
--allow-direct-checks) ALLOW_DIRECT_CHECKS=true ;;
|
|
--help|-h) usage; exit 0 ;;
|
|
*)
|
|
log_error "Unknown argument: $arg"
|
|
usage
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
|
|
load_env
|
|
require_vars UNRAID_IP UNRAID_SSH_USER UNRAID_GITEA_IP UNRAID_CADDY_IP \
|
|
UNRAID_COMPOSE_DIR \
|
|
GITEA_INTERNAL_URL GITEA_DOMAIN GITEA_ADMIN_TOKEN \
|
|
GITEA_ORG_NAME TLS_MODE CADDY_DOMAIN CADDY_DATA_PATH \
|
|
GITHUB_USERNAME GITHUB_TOKEN \
|
|
REPO_NAMES
|
|
|
|
if [[ "$TLS_MODE" == "cloudflare" ]]; then
|
|
require_vars CLOUDFLARE_API_TOKEN PUBLIC_DNS_TARGET_IP
|
|
elif [[ "$TLS_MODE" == "existing" ]]; then
|
|
require_vars SSL_CERT_PATH SSL_KEY_PATH
|
|
else
|
|
log_error "Invalid TLS_MODE='${TLS_MODE}' — must be 'cloudflare' or 'existing'"
|
|
exit 1
|
|
fi
|
|
|
|
phase_header 8 "Cutover (HTTPS via Caddy + Mark GitHub Mirrors)"
|
|
|
|
read -ra REPOS <<< "$REPO_NAMES"
|
|
PHASE8_STATE_DIR="$(_project_root)/.manifests"
|
|
PHASE8_STATE_FILE="${PHASE8_STATE_DIR}/phase8_github_repo_state.json"
|
|
# Reuse Unraid's existing Docker network.
|
|
UNRAID_DOCKER_NETWORK_NAME="br0"
|
|
# Compose files live in a centralized project directory.
|
|
CADDY_COMPOSE_DIR="${UNRAID_COMPOSE_DIR}/caddy"
|
|
PHASE8_GITEA_ROUTE_BEGIN="# BEGIN_PHASE8_GITEA_ROUTE"
|
|
PHASE8_GITEA_ROUTE_END="# END_PHASE8_GITEA_ROUTE"
|
|
PUBLIC_DNS_TARGET_IP="${PUBLIC_DNS_TARGET_IP:-}"
|
|
PHASE8_ALLOW_PRIVATE_DNS_TARGET="${PHASE8_ALLOW_PRIVATE_DNS_TARGET:-false}"
|
|
|
|
if ! validate_bool "${PHASE8_ALLOW_PRIVATE_DNS_TARGET}"; then
|
|
log_error "Invalid PHASE8_ALLOW_PRIVATE_DNS_TARGET='${PHASE8_ALLOW_PRIVATE_DNS_TARGET}' (must be true or false)"
|
|
exit 1
|
|
fi
|
|
|
|
wait_for_https_public() {
|
|
local host="$1" max_secs="${2:-30}"
|
|
local elapsed=0
|
|
while [[ $elapsed -lt $max_secs ]]; do
|
|
if curl -sf -o /dev/null "https://${host}/api/v1/version" 2>/dev/null; then
|
|
return 0
|
|
fi
|
|
sleep 2
|
|
elapsed=$((elapsed + 2))
|
|
done
|
|
return 1
|
|
}
|
|
|
|
wait_for_https_via_resolve() {
|
|
local host="$1" ip="$2" max_secs="${3:-300}"
|
|
local elapsed=0
|
|
log_info "Waiting for HTTPS via direct Caddy path (--resolve ${host}:443:${ip})..."
|
|
while [[ $elapsed -lt $max_secs ]]; do
|
|
if curl -skf --resolve "${host}:443:${ip}" "https://${host}/api/v1/version" >/dev/null 2>&1; then
|
|
log_success "HTTPS reachable via Caddy IP (after ${elapsed}s)"
|
|
return 0
|
|
fi
|
|
sleep 2
|
|
elapsed=$((elapsed + 2))
|
|
done
|
|
log_error "Timeout waiting for HTTPS via --resolve (${host} -> ${ip}) after ${max_secs}s"
|
|
if ssh_exec UNRAID "docker ps --format '{{.Names}}' | grep -qx 'caddy'" >/dev/null 2>&1; then
|
|
log_warn "Recent Caddy logs (tail 80):"
|
|
ssh_exec UNRAID "docker logs --tail 80 caddy 2>&1" || true
|
|
fi
|
|
return 1
|
|
}
|
|
|
|
check_unraid_gitea_backend() {
|
|
local raw code
|
|
raw=$(ssh_exec UNRAID "curl -sS -o /dev/null -w '%{http_code}' 'http://${UNRAID_GITEA_IP}:3000/api/v1/version' || true" 2>/dev/null || true)
|
|
code=$(printf '%s' "$raw" | tr -cd '0-9')
|
|
if [[ -z "$code" ]]; then
|
|
code="000"
|
|
elif [[ ${#code} -gt 3 ]]; then
|
|
code="${code:$((${#code} - 3))}"
|
|
fi
|
|
|
|
if [[ "$code" == "200" ]]; then
|
|
log_success "Unraid -> Gitea backend API reachable (HTTP 200)"
|
|
return 0
|
|
fi
|
|
|
|
log_error "Unraid -> Gitea backend API check failed (HTTP ${code}) at http://${UNRAID_GITEA_IP}:3000/api/v1/version"
|
|
return 1
|
|
}
|
|
|
|
is_private_ipv4() {
|
|
local ip="$1"
|
|
[[ "$ip" =~ ^10\. ]] || \
|
|
[[ "$ip" =~ ^192\.168\. ]] || \
|
|
[[ "$ip" =~ ^172\.(1[6-9]|2[0-9]|3[0-1])\. ]]
|
|
}
|
|
|
|
cloudflare_api_call() {
|
|
local method="$1" path="$2" data="${3:-}"
|
|
local -a args=(
|
|
curl -sS
|
|
-X "$method"
|
|
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}"
|
|
-H "Content-Type: application/json"
|
|
"https://api.cloudflare.com/client/v4${path}"
|
|
)
|
|
if [[ -n "$data" ]]; then
|
|
args+=(-d "$data")
|
|
fi
|
|
"${args[@]}"
|
|
}
|
|
|
|
ensure_cloudflare_dns_for_gitea() {
|
|
local host="$1" target_ip="$2" zone_id zone_name
|
|
local allow_private="${PHASE8_ALLOW_PRIVATE_DNS_TARGET}"
|
|
|
|
if [[ -z "$target_ip" ]]; then
|
|
log_error "PUBLIC_DNS_TARGET_IP is not set"
|
|
log_error "Set PUBLIC_DNS_TARGET_IP to your public ingress IP for ${host}"
|
|
log_error "For LAN-only/split-DNS use, also set PHASE8_ALLOW_PRIVATE_DNS_TARGET=true"
|
|
return 1
|
|
fi
|
|
|
|
if ! validate_ip "$target_ip"; then
|
|
log_error "Invalid PUBLIC_DNS_TARGET_IP='${target_ip}'"
|
|
log_error "Set PUBLIC_DNS_TARGET_IP in .env to the IP that should answer ${host}"
|
|
return 1
|
|
fi
|
|
|
|
zone_name="${host#*.}"
|
|
if [[ "$zone_name" == "$host" ]]; then
|
|
log_error "GITEA_DOMAIN='${host}' is not a valid FQDN for Cloudflare zone detection"
|
|
return 1
|
|
fi
|
|
|
|
if is_private_ipv4 "$target_ip"; then
|
|
if [[ "$allow_private" != "true" ]]; then
|
|
log_error "Refusing private DNS target ${target_ip} for Cloudflare public DNS"
|
|
log_error "Set PUBLIC_DNS_TARGET_IP to public ingress IP, or set PHASE8_ALLOW_PRIVATE_DNS_TARGET=true for LAN-only split-DNS"
|
|
return 1
|
|
fi
|
|
log_warn "Using private DNS target ${target_ip} because PHASE8_ALLOW_PRIVATE_DNS_TARGET=true"
|
|
fi
|
|
|
|
local zone_resp zone_err
|
|
zone_resp=$(cloudflare_api_call GET "/zones?name=${zone_name}&status=active")
|
|
if [[ "$(jq -r '.success // false' <<< "$zone_resp")" != "true" ]]; then
|
|
zone_err=$(jq -r '(.errors // []) | map(.message // tostring) | join("; ")' <<< "$zone_resp")
|
|
log_error "Cloudflare zone lookup failed for ${zone_name}: ${zone_err:-unknown error}"
|
|
return 1
|
|
fi
|
|
|
|
zone_id=$(jq -r '.result[0].id // empty' <<< "$zone_resp")
|
|
if [[ -z "$zone_id" ]]; then
|
|
log_error "Cloudflare zone not found or not accessible for ${zone_name}"
|
|
return 1
|
|
fi
|
|
|
|
local record_resp record_err record_count record_id old_ip
|
|
record_resp=$(cloudflare_api_call GET "/zones/${zone_id}/dns_records?type=A&name=${host}")
|
|
if [[ "$(jq -r '.success // false' <<< "$record_resp")" != "true" ]]; then
|
|
record_err=$(jq -r '(.errors // []) | map(.message // tostring) | join("; ")' <<< "$record_resp")
|
|
log_error "Cloudflare DNS query failed for ${host}: ${record_err:-unknown error}"
|
|
return 1
|
|
fi
|
|
|
|
record_count=$(jq -r '.result | length' <<< "$record_resp")
|
|
if [[ "$record_count" -eq 0 ]]; then
|
|
local create_payload create_resp create_err
|
|
create_payload=$(jq -n \
|
|
--arg type "A" \
|
|
--arg name "$host" \
|
|
--arg content "$target_ip" \
|
|
--argjson ttl 120 \
|
|
--argjson proxied false \
|
|
'{type:$type, name:$name, content:$content, ttl:$ttl, proxied:$proxied}')
|
|
create_resp=$(cloudflare_api_call POST "/zones/${zone_id}/dns_records" "$create_payload")
|
|
if [[ "$(jq -r '.success // false' <<< "$create_resp")" != "true" ]]; then
|
|
create_err=$(jq -r '(.errors // []) | map(.message // tostring) | join("; ")' <<< "$create_resp")
|
|
log_error "Failed to create Cloudflare A record ${host} -> ${target_ip}: ${create_err:-unknown error}"
|
|
return 1
|
|
fi
|
|
log_success "Created Cloudflare A record: ${host} -> ${target_ip}"
|
|
return 0
|
|
fi
|
|
|
|
record_id=$(jq -r '.result[0].id // empty' <<< "$record_resp")
|
|
old_ip=$(jq -r '.result[0].content // empty' <<< "$record_resp")
|
|
if [[ -n "$old_ip" && "$old_ip" == "$target_ip" ]]; then
|
|
log_info "Cloudflare A record already correct: ${host} -> ${target_ip}"
|
|
return 0
|
|
fi
|
|
|
|
local update_payload update_resp update_err
|
|
update_payload=$(jq -n \
|
|
--arg type "A" \
|
|
--arg name "$host" \
|
|
--arg content "$target_ip" \
|
|
--argjson ttl 120 \
|
|
--argjson proxied false \
|
|
'{type:$type, name:$name, content:$content, ttl:$ttl, proxied:$proxied}')
|
|
update_resp=$(cloudflare_api_call PUT "/zones/${zone_id}/dns_records/${record_id}" "$update_payload")
|
|
if [[ "$(jq -r '.success // false' <<< "$update_resp")" != "true" ]]; then
|
|
update_err=$(jq -r '(.errors // []) | map(.message // tostring) | join("; ")' <<< "$update_resp")
|
|
log_error "Failed to update Cloudflare A record ${host}: ${update_err:-unknown error}"
|
|
return 1
|
|
fi
|
|
|
|
log_info "Updated Cloudflare A record: ${host}"
|
|
log_info " old: ${old_ip:-<empty>}"
|
|
log_info " new: ${target_ip}"
|
|
return 0
|
|
}
|
|
|
|
caddyfile_has_domain_block() {
|
|
local file="$1" domain="$2"
|
|
awk -v domain="$domain" '
|
|
function trim(s) {
|
|
sub(/^[[:space:]]+/, "", s)
|
|
sub(/[[:space:]]+$/, "", s)
|
|
return s
|
|
}
|
|
function matches_domain(label, dom, wild_suffix, dot_pos) {
|
|
if (label == dom) return 1
|
|
# Wildcard match: *.example.com covers sub.example.com
|
|
if (substr(label, 1, 2) == "*.") {
|
|
wild_suffix = substr(label, 2)
|
|
dot_pos = index(dom, ".")
|
|
if (dot_pos > 0 && substr(dom, dot_pos) == wild_suffix) return 1
|
|
}
|
|
return 0
|
|
}
|
|
{
|
|
line = $0
|
|
if (line ~ /^[[:space:]]*#/) next
|
|
pos = index(line, "{")
|
|
if (pos <= 0) next
|
|
|
|
labels = trim(substr(line, 1, pos - 1))
|
|
if (labels == "" || labels ~ /^\(/) next
|
|
|
|
gsub(/[[:space:]]+/, "", labels)
|
|
n = split(labels, parts, ",")
|
|
for (i = 1; i <= n; i++) {
|
|
if (matches_domain(parts[i], domain)) {
|
|
found = 1
|
|
}
|
|
}
|
|
}
|
|
END {
|
|
exit(found ? 0 : 1)
|
|
}
|
|
' "$file"
|
|
}
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Helper: persist original GitHub repo settings for teardown symmetry
|
|
# ---------------------------------------------------------------------------
|
|
init_phase8_state_store() {
|
|
mkdir -p "$PHASE8_STATE_DIR"
|
|
if [[ ! -f "$PHASE8_STATE_FILE" ]]; then
|
|
printf '{}\n' > "$PHASE8_STATE_FILE"
|
|
fi
|
|
}
|
|
|
|
fetch_github_pages_state() {
|
|
local repo="$1"
|
|
local tmpfile http_code
|
|
local pages_enabled=false
|
|
local pages_cname=""
|
|
local pages_source_branch=""
|
|
local pages_source_path="/"
|
|
|
|
tmpfile=$(mktemp)
|
|
http_code=$(curl -s \
|
|
-H "Authorization: token ${GITHUB_TOKEN}" \
|
|
-H "Accept: application/json" \
|
|
-o "$tmpfile" \
|
|
-w "%{http_code}" \
|
|
"https://api.github.com/repos/${GITHUB_USERNAME}/${repo}/pages" || echo "000")
|
|
|
|
if [[ "$http_code" == "200" ]]; then
|
|
pages_enabled=true
|
|
pages_cname=$(jq -r '.cname // ""' "$tmpfile")
|
|
pages_source_branch=$(jq -r '.source.branch // ""' "$tmpfile")
|
|
pages_source_path=$(jq -r '.source.path // "/"' "$tmpfile")
|
|
fi
|
|
rm -f "$tmpfile"
|
|
|
|
jq -n \
|
|
--argjson pages_enabled "$pages_enabled" \
|
|
--arg pages_cname "$pages_cname" \
|
|
--arg pages_source_branch "$pages_source_branch" \
|
|
--arg pages_source_path "$pages_source_path" \
|
|
'{
|
|
pages_enabled: $pages_enabled,
|
|
pages_cname: $pages_cname,
|
|
pages_source_branch: $pages_source_branch,
|
|
pages_source_path: $pages_source_path
|
|
}'
|
|
}
|
|
|
|
snapshot_repo_state() {
|
|
local repo="$1"
|
|
local repo_data="$2"
|
|
|
|
init_phase8_state_store
|
|
|
|
if jq -e --arg repo "$repo" 'has($repo)' "$PHASE8_STATE_FILE" >/dev/null 2>&1; then
|
|
log_info "State snapshot already exists for ${repo} — preserving original values"
|
|
return 0
|
|
fi
|
|
|
|
local current_desc current_homepage current_has_wiki current_has_projects
|
|
local pages_state tmpfile
|
|
|
|
current_desc=$(printf '%s' "$repo_data" | jq -r '.description // ""')
|
|
current_homepage=$(printf '%s' "$repo_data" | jq -r '.homepage // ""')
|
|
current_has_wiki=$(printf '%s' "$repo_data" | jq -r '.has_wiki // false')
|
|
current_has_projects=$(printf '%s' "$repo_data" | jq -r '.has_projects // false')
|
|
pages_state=$(fetch_github_pages_state "$repo")
|
|
|
|
tmpfile=$(mktemp)
|
|
jq \
|
|
--arg repo "$repo" \
|
|
--arg description "$current_desc" \
|
|
--arg homepage "$current_homepage" \
|
|
--argjson has_wiki "$current_has_wiki" \
|
|
--argjson has_projects "$current_has_projects" \
|
|
--argjson pages "$pages_state" \
|
|
'.[$repo] = {
|
|
description: $description,
|
|
homepage: $homepage,
|
|
has_wiki: $has_wiki,
|
|
has_projects: $has_projects
|
|
} + $pages' \
|
|
"$PHASE8_STATE_FILE" > "$tmpfile"
|
|
mv "$tmpfile" "$PHASE8_STATE_FILE"
|
|
|
|
log_info "Saved pre-cutover GitHub settings for ${repo}"
|
|
}
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Step 1: Create Caddy data directories
|
|
# ---------------------------------------------------------------------------
|
|
log_step 1 "Creating Caddy data directories on Unraid..."
|
|
if ssh_exec UNRAID "test -d '${CADDY_DATA_PATH}/data'"; then
|
|
log_info "Caddy data directory already exists — skipping"
|
|
else
|
|
ssh_exec UNRAID "mkdir -p '${CADDY_DATA_PATH}/data' '${CADDY_DATA_PATH}/config'"
|
|
log_success "Caddy data directories created"
|
|
fi
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Step 2: Render + deploy Caddyfile
|
|
# ---------------------------------------------------------------------------
|
|
log_step 2 "Deploying Caddyfile..."
|
|
GITEA_CONTAINER_IP="${UNRAID_GITEA_IP}"
|
|
export GITEA_CONTAINER_IP GITEA_DOMAIN CADDY_DOMAIN
|
|
|
|
# Build TLS block based on TLS_MODE
|
|
if [[ "$TLS_MODE" == "cloudflare" ]]; then
|
|
TLS_BLOCK=" tls {
|
|
dns cloudflare {env.CF_API_TOKEN}
|
|
}"
|
|
else
|
|
TLS_BLOCK=" tls ${SSL_CERT_PATH} ${SSL_KEY_PATH}"
|
|
fi
|
|
export TLS_BLOCK
|
|
|
|
if ssh_exec UNRAID "test -f '${CADDY_DATA_PATH}/Caddyfile'" 2>/dev/null; then
|
|
TMP_EXISTING=$(mktemp)
|
|
TMP_UPDATED=$(mktemp)
|
|
TMP_ROUTE_BLOCK=$(mktemp)
|
|
|
|
ssh_exec UNRAID "cat '${CADDY_DATA_PATH}/Caddyfile'" > "$TMP_EXISTING"
|
|
|
|
if caddyfile_has_domain_block "$TMP_EXISTING" "$GITEA_DOMAIN"; then
|
|
log_info "Caddyfile already has a route for ${GITEA_DOMAIN} — preserving existing file"
|
|
else
|
|
log_warn "Caddyfile exists but has no explicit route for ${GITEA_DOMAIN}"
|
|
log_info "Appending managed Gitea route block"
|
|
{
|
|
echo
|
|
echo "${PHASE8_GITEA_ROUTE_BEGIN}"
|
|
echo "${GITEA_DOMAIN} {"
|
|
printf '%s\n' "$TLS_BLOCK"
|
|
echo
|
|
echo " reverse_proxy ${GITEA_CONTAINER_IP}:3000"
|
|
echo "}"
|
|
echo "${PHASE8_GITEA_ROUTE_END}"
|
|
echo
|
|
} > "$TMP_ROUTE_BLOCK"
|
|
|
|
# Remove a stale managed block (if present), then append refreshed block.
|
|
sed "/^${PHASE8_GITEA_ROUTE_BEGIN}\$/,/^${PHASE8_GITEA_ROUTE_END}\$/d" "$TMP_EXISTING" > "$TMP_UPDATED"
|
|
cat "$TMP_UPDATED" "$TMP_ROUTE_BLOCK" > "${TMP_UPDATED}.final"
|
|
scp_to UNRAID "${TMP_UPDATED}.final" "${CADDY_DATA_PATH}/Caddyfile"
|
|
log_success "Appended managed Gitea route to existing Caddyfile"
|
|
fi
|
|
|
|
rm -f "$TMP_EXISTING" "$TMP_UPDATED" "$TMP_ROUTE_BLOCK" "${TMP_UPDATED}.final"
|
|
else
|
|
TMPFILE=$(mktemp)
|
|
|
|
render_template "${SCRIPT_DIR}/templates/Caddyfile.tpl" "$TMPFILE" \
|
|
"\${CADDY_DOMAIN} \${GITEA_DOMAIN} \${TLS_BLOCK} \${GITEA_CONTAINER_IP}"
|
|
scp_to UNRAID "$TMPFILE" "${CADDY_DATA_PATH}/Caddyfile"
|
|
rm -f "$TMPFILE"
|
|
log_success "Caddyfile deployed"
|
|
fi
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Step 3: Render + deploy Caddy docker-compose
|
|
# ---------------------------------------------------------------------------
|
|
log_step 3 "Deploying Caddy docker-compose..."
|
|
if ssh_exec UNRAID "test -f '${CADDY_COMPOSE_DIR}/docker-compose.yml'" 2>/dev/null; then
|
|
log_info "Caddy docker-compose.yml already exists — skipping"
|
|
else
|
|
if ! ssh_exec UNRAID "docker network inspect '${UNRAID_DOCKER_NETWORK_NAME}'" &>/dev/null; then
|
|
log_error "Required Docker network '${UNRAID_DOCKER_NETWORK_NAME}' not found on Unraid."
|
|
log_error "Create it in Unraid first or update phase8_cutover.sh to match your network name."
|
|
exit 1
|
|
fi
|
|
ssh_exec UNRAID "mkdir -p '${CADDY_COMPOSE_DIR}'"
|
|
TMPFILE=$(mktemp)
|
|
CADDY_CONTAINER_IP="${UNRAID_CADDY_IP}"
|
|
GITEA_NETWORK_NAME="${UNRAID_DOCKER_NETWORK_NAME}"
|
|
export CADDY_CONTAINER_IP CADDY_DATA_PATH GITEA_NETWORK_NAME
|
|
|
|
if [[ "$TLS_MODE" == "cloudflare" ]]; then
|
|
CADDY_ENV_VARS=" - CF_API_TOKEN=${CLOUDFLARE_API_TOKEN}"
|
|
CADDY_EXTRA_VOLUMES=""
|
|
else
|
|
CADDY_ENV_VARS=""
|
|
# Mount cert/key files into the container
|
|
CADDY_EXTRA_VOLUMES=" - ${SSL_CERT_PATH}:${SSL_CERT_PATH}:ro
|
|
- ${SSL_KEY_PATH}:${SSL_KEY_PATH}:ro"
|
|
fi
|
|
export CADDY_ENV_VARS CADDY_EXTRA_VOLUMES
|
|
|
|
render_template "${SCRIPT_DIR}/templates/docker-compose-caddy.yml.tpl" "$TMPFILE" \
|
|
"\${CADDY_DATA_PATH} \${CADDY_CONTAINER_IP} \${CADDY_ENV_VARS} \${CADDY_EXTRA_VOLUMES} \${GITEA_NETWORK_NAME}"
|
|
# Strip empty YAML blocks left when optional vars are blank
|
|
if [[ -z "$CADDY_ENV_VARS" ]]; then
|
|
sed -i.bak '/^[[:space:]]*environment:$/d' "$TMPFILE"
|
|
rm -f "${TMPFILE}.bak"
|
|
fi
|
|
if [[ -z "$CADDY_EXTRA_VOLUMES" ]]; then
|
|
# Remove trailing blank lines after the volumes block
|
|
sed -i.bak -e :a -e '/^\n*$/{$d;N;ba' -e '}' "$TMPFILE"
|
|
rm -f "${TMPFILE}.bak"
|
|
fi
|
|
scp_to UNRAID "$TMPFILE" "${CADDY_COMPOSE_DIR}/docker-compose.yml"
|
|
rm -f "$TMPFILE"
|
|
log_success "Caddy docker-compose.yml deployed to ${CADDY_COMPOSE_DIR}"
|
|
fi
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Step 4: Start Caddy container
|
|
# ---------------------------------------------------------------------------
|
|
log_step 4 "Starting Caddy container..."
|
|
CONTAINER_STATUS=$(ssh_exec UNRAID "docker ps --filter name=caddy --format '{{.Status}}'" 2>/dev/null || true)
|
|
if [[ "$CONTAINER_STATUS" == *"Up"* ]]; then
|
|
log_info "Caddy container already running"
|
|
log_info "Reloading Caddy config from /etc/caddy/Caddyfile"
|
|
if ssh_exec UNRAID "docker exec caddy caddy reload --config /etc/caddy/Caddyfile --adapter caddyfile" >/dev/null 2>&1; then
|
|
log_success "Caddy config reloaded"
|
|
else
|
|
log_warn "Caddy reload failed; restarting caddy container"
|
|
ssh_exec UNRAID "docker restart caddy >/dev/null"
|
|
log_success "Caddy container restarted"
|
|
fi
|
|
else
|
|
ssh_exec UNRAID "cd '${CADDY_COMPOSE_DIR}' && docker compose up -d 2>/dev/null || docker-compose up -d"
|
|
if ssh_exec UNRAID "docker exec caddy caddy reload --config /etc/caddy/Caddyfile --adapter caddyfile" >/dev/null 2>&1; then
|
|
log_success "Caddy container started and config loaded"
|
|
else
|
|
log_success "Caddy container started"
|
|
fi
|
|
fi
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Step 5: Ensure DNS points Gitea domain to target ingress IP
|
|
# ---------------------------------------------------------------------------
|
|
log_step 5 "Ensuring DNS for ${GITEA_DOMAIN}..."
|
|
if [[ "$TLS_MODE" == "cloudflare" ]]; then
|
|
ensure_cloudflare_dns_for_gitea "${GITEA_DOMAIN}" "${PUBLIC_DNS_TARGET_IP}"
|
|
else
|
|
log_info "TLS_MODE=${TLS_MODE}; skipping Cloudflare DNS automation"
|
|
fi
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Step 6: Wait for HTTPS to work
|
|
# Caddy auto-obtains certs — poll until HTTPS responds.
|
|
# ---------------------------------------------------------------------------
|
|
log_step 6 "Waiting for HTTPS (Caddy auto-provisions cert)..."
|
|
check_unraid_gitea_backend
|
|
if wait_for_https_public "${GITEA_DOMAIN}" 60; then
|
|
log_success "HTTPS verified through current domain routing — https://${GITEA_DOMAIN} works"
|
|
else
|
|
log_warn "Public-domain routing to Caddy is not ready yet"
|
|
if [[ "$ALLOW_DIRECT_CHECKS" == "true" ]]; then
|
|
wait_for_https_via_resolve "${GITEA_DOMAIN}" "${UNRAID_CADDY_IP}" 300
|
|
log_warn "Proceeding with direct-only HTTPS validation (--allow-direct-checks)"
|
|
else
|
|
log_error "Refusing to continue cutover without public HTTPS reachability"
|
|
log_error "Fix DNS/ingress routing and rerun Phase 8, or use --allow-direct-checks for staging only"
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Step 7: Mark GitHub repos as offsite backup only
|
|
# Updates description + homepage to indicate Gitea is primary.
|
|
# Disables wiki and Pages to avoid unnecessary resource usage.
|
|
# Does NOT archive — archived repos reject pushes, which would break
|
|
# the push mirrors configured in Phase 6.
|
|
# Persists original mutable settings to a local state file for teardown.
|
|
# GitHub Actions already disabled in Phase 6 Step D.
|
|
# ---------------------------------------------------------------------------
|
|
log_step 7 "Marking GitHub repos as offsite backup..."
|
|
|
|
init_phase8_state_store
|
|
GITHUB_REPO_UPDATE_FAILURES=0
|
|
for repo in "${REPOS[@]}"; do
|
|
# Fetch repo metadata (single API call)
|
|
REPO_DATA=$(github_api GET "/repos/${GITHUB_USERNAME}/${repo}" 2>/dev/null || echo "{}")
|
|
CURRENT_DESC=$(printf '%s' "$REPO_DATA" | jq -r '.description // ""')
|
|
|
|
# Skip if already marked
|
|
if [[ "$CURRENT_DESC" == "[MIRROR]"* ]]; then
|
|
if ! jq -e --arg repo "$repo" 'has($repo)' "$PHASE8_STATE_FILE" >/dev/null 2>&1; then
|
|
log_warn "GitHub repo ${repo} already marked as mirror but no local state snapshot exists"
|
|
log_warn " → Teardown may not fully restore pre-cutover settings for this repo"
|
|
fi
|
|
log_info "GitHub repo ${repo} already marked as mirror — skipping"
|
|
continue
|
|
fi
|
|
|
|
# Snapshot current mutable state so teardown can restore exactly.
|
|
snapshot_repo_state "$repo" "$REPO_DATA"
|
|
|
|
# Build new description preserving original
|
|
NEW_DESC="[MIRROR] Offsite backup — primary at https://${GITEA_DOMAIN}/${GITEA_ORG_NAME}/${repo}"
|
|
if [[ -n "$CURRENT_DESC" ]]; then
|
|
NEW_DESC="${NEW_DESC} — was: ${CURRENT_DESC}"
|
|
fi
|
|
|
|
# Update description + homepage, disable wiki and projects
|
|
UPDATE_PAYLOAD=$(jq -n \
|
|
--arg description "$NEW_DESC" \
|
|
--arg homepage "https://${GITEA_DOMAIN}/${GITEA_ORG_NAME}/${repo}" \
|
|
'{description: $description, homepage: $homepage, has_wiki: false, has_projects: false}')
|
|
|
|
if PATCH_OUT=$(github_api PATCH "/repos/${GITHUB_USERNAME}/${repo}" "$UPDATE_PAYLOAD" 2>&1); then
|
|
log_success "Marked GitHub repo as mirror: ${repo}"
|
|
else
|
|
log_error "Failed to update GitHub repo: ${repo}"
|
|
log_error "GitHub API: $(printf '%s' "$PATCH_OUT" | tail -n 1)"
|
|
GITHUB_REPO_UPDATE_FAILURES=$((GITHUB_REPO_UPDATE_FAILURES + 1))
|
|
fi
|
|
|
|
# Disable GitHub Pages if enabled (Pages can incur bandwidth costs)
|
|
github_api DELETE "/repos/${GITHUB_USERNAME}/${repo}/pages" >/dev/null 2>&1 || true
|
|
done
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Summary
|
|
# ---------------------------------------------------------------------------
|
|
printf '\n'
|
|
if [[ "$GITHUB_REPO_UPDATE_FAILURES" -gt 0 ]]; then
|
|
log_error "Phase 8 failed: ${GITHUB_REPO_UPDATE_FAILURES} GitHub repo update(s) failed"
|
|
exit 1
|
|
fi
|
|
log_success "Phase 8 complete — Gitea is live at https://${GITEA_DOMAIN}"
|
|
log_info "GitHub repos marked as offsite backup. Push mirrors remain active."
|