Files
ente-cloudron/start.sh
2025-11-20 11:54:40 -06:00

1388 lines
45 KiB
Bash
Executable File

#!/bin/bash
set -euo pipefail
log() {
local level="$1"
shift
local message="$*"
local timestamp
timestamp="$(date '+%Y-%m-%d %H:%M:%S')"
echo "[$timestamp] [$level] $message"
}
resolve_http_hostname() {
local env_key="$1"
local fallback="$2"
local value=""
local varname="CLOUDRON_${env_key}"
value="$(printenv "$varname" 2>/dev/null || true)"
if [ -z "$value" ]; then
local alt_varname="CLOUDRON_HTTP_PORT_${env_key}"
value="$(printenv "$alt_varname" 2>/dev/null || true)"
fi
if [ -z "$value" ]; then
local bare_varname="${env_key}"
value="$(printenv "$bare_varname" 2>/dev/null || true)"
fi
if [ -z "$value" ]; then
if [ "$APP_FQDN" = "localhost" ]; then
printf '%s\n' "$APP_FQDN"
else
printf '%s\n' "$fallback"
fi
return
fi
value="${value%.}"
case "$value" in
"@" )
printf '%s\n' "$APP_FQDN"
;;
*@ )
printf '%s.%s\n' "${value%@}" "$APP_FQDN"
;;
*.* )
printf '%s\n' "$value"
;;
* )
if [ "$APP_FQDN" = "localhost" ]; then
printf '%s\n' "$APP_FQDN"
else
printf '%s.%s\n' "$value" "$APP_FQDN"
fi
;;
esac
}
APP_DIR="/app/code"
DATA_DIR="/app/data"
LOG_DIR="$DATA_DIR/logs"
CONFIG_DIR="$DATA_DIR/config"
TMP_DIR="$DATA_DIR/tmp"
SECRETS_DIR="$DATA_DIR/secrets"
MUSEUM_RUNTIME_DIR="$DATA_DIR/museum"
MUSEUM_CONFIG_DIR="$MUSEUM_RUNTIME_DIR/configurations"
MUSEUM_CONFIG="$MUSEUM_CONFIG_DIR/local.yaml"
MUSEUM_BIN="/app/museum-bin/museum"
WEB_SOURCE_DIR="/app/web"
WEB_RUNTIME_DIR="$DATA_DIR/web"
CADDY_CONFIG="$DATA_DIR/Caddyfile"
STARTUP_FLAG="$DATA_DIR/startup.lock"
mkdir -p "$LOG_DIR" "$CONFIG_DIR" "$TMP_DIR" "$SECRETS_DIR" "$MUSEUM_RUNTIME_DIR" "$WEB_RUNTIME_DIR" "$MUSEUM_CONFIG_DIR"
chown -R cloudron:cloudron "$DATA_DIR"
LOG_FILE="$LOG_DIR/startup.log"
touch "$LOG_FILE"
chown cloudron:cloudron "$LOG_FILE"
chmod 640 "$LOG_FILE"
# Mirror all output to a persistent log file while retaining stdout/stderr for Cloudron aggregation
exec > >(tee -a "$LOG_FILE") 2>&1
log INFO "Starting Ente for Cloudron"
log INFO "Startup logs are mirrored to $LOG_FILE"
if ! command -v setpriv >/dev/null 2>&1; then
log ERROR "setpriv command not found"
exit 1
fi
if [ -f "$STARTUP_FLAG" ]; then
log WARN "Previous startup did not finish cleanly; removing flag"
rm -f "$STARTUP_FLAG"
fi
touch "$STARTUP_FLAG"
trap 'rm -f "$STARTUP_FLAG"' EXIT
APP_FQDN="${CLOUDRON_APP_DOMAIN:-${CLOUDRON_APP_FQDN:-localhost}}"
BASE_URL="${CLOUDRON_APP_ORIGIN:-https://$APP_FQDN}"
BASE_URL="${BASE_URL%/}"
PHOTOS_HOST="$(resolve_http_hostname "PHOTOS_DOMAIN" "$APP_FQDN")"
ACCOUNTS_HOST="$(resolve_http_hostname "ACCOUNTS_DOMAIN" "accounts.${APP_FQDN}")"
AUTH_HOST="$(resolve_http_hostname "AUTH_DOMAIN" "auth.${APP_FQDN}")"
CAST_HOST="$(resolve_http_hostname "CAST_DOMAIN" "cast.${APP_FQDN}")"
ALBUMS_HOST="$(resolve_http_hostname "ALBUMS_DOMAIN" "albums.${APP_FQDN}")"
FAMILY_HOST="$(resolve_http_hostname "FAMILY_DOMAIN" "family.${APP_FQDN}")"
normalize_host() {
local host="$1"
local suffix=".${APP_FQDN#*.}"
case "$host" in
*"$suffix")
printf '%s\n' "$host"
;;
*)
printf '%s%s\n' "$host" "$suffix"
;;
esac
}
ACCOUNTS_HOST="$(normalize_host "$ACCOUNTS_HOST")"
AUTH_HOST="$(normalize_host "$AUTH_HOST")"
CAST_HOST="$(normalize_host "$CAST_HOST")"
ALBUMS_HOST="$(normalize_host "$ALBUMS_HOST")"
FAMILY_HOST="$(normalize_host "$FAMILY_HOST")"
USE_SUBDOMAIN_ROUTING=false
if [ "$APP_FQDN" != "localhost" ]; then
if [ "$PHOTOS_HOST" != "$APP_FQDN" ] || [ "$ACCOUNTS_HOST" != "$APP_FQDN" ] || [ "$AUTH_HOST" != "$APP_FQDN" ] || [ "$CAST_HOST" != "$APP_FQDN" ] || [ "$ALBUMS_HOST" != "$APP_FQDN" ] || [ "$FAMILY_HOST" != "$APP_FQDN" ]; then
USE_SUBDOMAIN_ROUTING=true
fi
fi
PHOTOS_URL="https://${PHOTOS_HOST}"
if [ "$USE_SUBDOMAIN_ROUTING" = true ]; then
ACCOUNTS_URL="https://${ACCOUNTS_HOST}"
AUTH_URL="https://${AUTH_HOST}"
CAST_URL="https://${CAST_HOST}"
FAMILY_URL="https://${FAMILY_HOST}"
ALBUMS_URL="https://${ALBUMS_HOST}"
else
ACCOUNTS_URL="${BASE_URL}/accounts"
AUTH_URL="${BASE_URL}/auth"
CAST_URL="${BASE_URL}/cast"
FAMILY_URL="${BASE_URL}/family"
ALBUMS_URL="${BASE_URL}/albums"
fi
if [ "$APP_FQDN" != "localhost" ]; then
API_BASE="https://${PHOTOS_HOST}"
else
API_BASE="$BASE_URL"
fi
API_ORIGIN="${API_BASE}/api"
RP_ID="$PHOTOS_HOST"
log INFO "Application base URL: $BASE_URL"
log INFO "Relying party ID: $RP_ID"
log INFO "API origin: $API_ORIGIN"
if [ "$USE_SUBDOMAIN_ROUTING" = true ]; then
log INFO "Serving frontend hosts: photos=${PHOTOS_HOST}, accounts=${ACCOUNTS_HOST}, auth=${AUTH_HOST}, cast=${CAST_HOST}"
fi
S3_CONFIG_FILE="$CONFIG_DIR/s3.env"
if [ ! -f "$S3_CONFIG_FILE" ]; then
cat > "$S3_CONFIG_FILE" <<'EOF_S3'
# S3 configuration for Ente (required)
# Provide credentials for an S3-compatible object storage and restart the app.
#
# Supported environment variables (either set here or via Cloudron env vars):
# S3_ENDPOINT=https://example.s3-provider.com
# S3_REGION=us-east-1
# S3_BUCKET=ente-data
# S3_ACCESS_KEY=your-access-key
# S3_SECRET_KEY=your-secret-key
# S3_PREFIX=optional/path/prefix
# Optional replication settings (secondary object storage):
# S3_SECONDARY_ENDPOINT=https://secondary.s3-provider.com
# S3_SECONDARY_REGION=us-west-1
# S3_SECONDARY_BUCKET=ente-data-backup
# S3_SECONDARY_ACCESS_KEY=secondary-access-key
# S3_SECONDARY_SECRET_KEY=secondary-secret-key
# S3_SECONDARY_PREFIX=optional/path/prefix
# S3_SECONDARY_DC=b2-us-west
# S3_COLD_ENDPOINT=https://cold.s3-provider.com
# S3_COLD_REGION=eu-central-1
# S3_COLD_BUCKET=ente-cold
# S3_COLD_ACCESS_KEY=cold-access-key
# S3_COLD_SECRET_KEY=cold-secret-key
# S3_COLD_PREFIX=optional/path/prefix
# S3_COLD_DC=scw-eu-fr-v3
# Replication requires configuring both the secondary hot storage and the cold
# storage buckets. Leave these unset to run with a single bucket. (Derived storage
# is optional and defaults to the primary bucket.)
#
#
# Example layout (replication):
#
# Primary hot bucket (Backblaze B2):
#S3_ENDPOINT=https://s3.us-west-002.backblazeb2.com
#S3_REGION=us-west-002
#S3_BUCKET=ente-due-ren
#S3_ACCESS_KEY=<B2_PRIMARY_ACCESS_KEY>
#S3_SECRET_KEY=<B2_PRIMARY_SECRET_KEY>
#S3_FORCE_PATH_STYLE=true
#S3_PRIMARY_DC=b2-eu-cen
#
# Secondary hot bucket (Hetzner Object Storage, hel1):
#S3_SECONDARY_ENDPOINT=https://hel1.your-objectstorage.com
#S3_SECONDARY_REGION=hel1
#S3_SECONDARY_BUCKET=ente-secondary
#S3_SECONDARY_ACCESS_KEY=<HETZNER_ACCESS_KEY>
#S3_SECONDARY_SECRET_KEY=<HETZNER_SECRET_KEY>
#S3_SECONDARY_FORCE_PATH_STYLE=true
#S3_SECONDARY_DC=wasabi-eu-central-2-v3
#
# Cold bucket (Cloudflare R2):
#S3_COLD_ENDPOINT=https://<account-id>.r2.cloudflarestorage.com
#S3_COLD_REGION=auto
#S3_COLD_BUCKET=ente-cold
#S3_COLD_ACCESS_KEY=<R2_ACCESS_KEY>
#S3_COLD_SECRET_KEY=<R2_SECRET_KEY>
#S3_COLD_FORCE_PATH_STYLE=true
#S3_COLD_DC=scw-eu-fr-v3
#
# When all three blocks are configured, replication is enabled automatically.
EOF_S3
chown cloudron:cloudron "$S3_CONFIG_FILE"
chmod 600 "$S3_CONFIG_FILE"
log INFO "Created S3 configuration template at $S3_CONFIG_FILE"
fi
set +u
if [ -f "$S3_CONFIG_FILE" ]; then
# shellcheck disable=SC1090
. "$S3_CONFIG_FILE"
fi
set -u
parse_s3_endpoint() {
local raw="$1"
local prefix="$2"
local host_var="$3"
local prefix_var="$4"
local host="${raw#https://}"
host="${host#http://}"
host="${host%%/}"
local path="${host#*/}"
if [ "$path" != "$host" ]; then
if [ -z "$prefix" ]; then
prefix="$path"
fi
host="${host%%/*}"
fi
printf -v "$host_var" "%s" "$host"
printf -v "$prefix_var" "%s" "$prefix"
}
S3_ENDPOINT="${S3_ENDPOINT:-${ENTE_S3_ENDPOINT:-}}"
S3_REGION="${S3_REGION:-${ENTE_S3_REGION:-}}"
S3_BUCKET="${S3_BUCKET:-${ENTE_S3_BUCKET:-}}"
S3_ACCESS_KEY="${S3_ACCESS_KEY:-${ENTE_S3_ACCESS_KEY:-}}"
S3_SECRET_KEY="${S3_SECRET_KEY:-${ENTE_S3_SECRET_KEY:-}}"
S3_PREFIX="${S3_PREFIX:-${ENTE_S3_PREFIX:-}}"
S3_SECONDARY_ENDPOINT="${S3_SECONDARY_ENDPOINT:-${ENTE_S3_SECONDARY_ENDPOINT:-}}"
S3_SECONDARY_REGION="${S3_SECONDARY_REGION:-${ENTE_S3_SECONDARY_REGION:-}}"
S3_SECONDARY_BUCKET="${S3_SECONDARY_BUCKET:-${ENTE_S3_SECONDARY_BUCKET:-}}"
S3_SECONDARY_ACCESS_KEY="${S3_SECONDARY_ACCESS_KEY:-${ENTE_S3_SECONDARY_ACCESS_KEY:-}}"
S3_SECONDARY_SECRET_KEY="${S3_SECONDARY_SECRET_KEY:-${ENTE_S3_SECONDARY_SECRET_KEY:-}}"
S3_SECONDARY_PREFIX="${S3_SECONDARY_PREFIX:-${ENTE_S3_SECONDARY_PREFIX:-}}"
S3_SECONDARY_ENABLED=false
S3_SECONDARY_ENDPOINT_HOST=""
S3_COLD_ENDPOINT="${S3_COLD_ENDPOINT:-${ENTE_S3_COLD_ENDPOINT:-}}"
S3_COLD_REGION="${S3_COLD_REGION:-${ENTE_S3_COLD_REGION:-}}"
S3_COLD_BUCKET="${S3_COLD_BUCKET:-${ENTE_S3_COLD_BUCKET:-}}"
S3_COLD_ACCESS_KEY="${S3_COLD_ACCESS_KEY:-${ENTE_S3_COLD_ACCESS_KEY:-}}"
S3_COLD_SECRET_KEY="${S3_COLD_SECRET_KEY:-${ENTE_S3_COLD_SECRET_KEY:-}}"
S3_COLD_PREFIX="${S3_COLD_PREFIX:-${ENTE_S3_COLD_PREFIX:-}}"
S3_COLD_ENABLED=false
S3_COLD_ENDPOINT_HOST=""
S3_DERIVED_ENDPOINT="${S3_DERIVED_ENDPOINT:-${ENTE_S3_DERIVED_ENDPOINT:-}}"
S3_DERIVED_REGION="${S3_DERIVED_REGION:-${ENTE_S3_DERIVED_REGION:-}}"
S3_DERIVED_BUCKET="${S3_DERIVED_BUCKET:-${ENTE_S3_DERIVED_BUCKET:-}}"
S3_DERIVED_ACCESS_KEY="${S3_DERIVED_ACCESS_KEY:-${ENTE_S3_DERIVED_ACCESS_KEY:-}}"
S3_DERIVED_SECRET_KEY="${S3_DERIVED_SECRET_KEY:-${ENTE_S3_DERIVED_SECRET_KEY:-}}"
S3_DERIVED_PREFIX="${S3_DERIVED_PREFIX:-${ENTE_S3_DERIVED_PREFIX:-}}"
S3_DERIVED_CUSTOM=false
S3_DERIVED_ENDPOINT_HOST=""
if [ -z "$S3_ENDPOINT" ] || [ -z "$S3_REGION" ] || [ -z "$S3_BUCKET" ] || [ -z "$S3_ACCESS_KEY" ] || [ -z "$S3_SECRET_KEY" ]; then
log ERROR "Missing S3 configuration. Update $S3_CONFIG_FILE or set environment variables."
log ERROR "The application will start in configuration mode. Please configure S3 and restart."
S3_NOT_CONFIGURED=true
else
S3_NOT_CONFIGURED=false
fi
if [ "$S3_NOT_CONFIGURED" = "false" ]; then
parse_s3_endpoint "$S3_ENDPOINT" "$S3_PREFIX" S3_ENDPOINT_HOST S3_PREFIX
parse_s3_endpoint "$S3_DERIVED_ENDPOINT" "$S3_DERIVED_PREFIX" S3_DERIVED_ENDPOINT_HOST S3_DERIVED_PREFIX
S3_REGION_LOWER="$(printf '%s' "$S3_REGION" | tr '[:upper:]' '[:lower:]')"
if printf '%s' "$S3_ENDPOINT_HOST" | grep -q '\.r2\.cloudflarestorage\.com$' && [ "$S3_REGION_LOWER" != "auto" ]; then
log WARN "Cloudflare R2 endpoints require S3_REGION=auto; current value '$S3_REGION' may cause upload failures"
fi
else
S3_ENDPOINT_HOST="s3.example.com"
S3_DERIVED_ENDPOINT_HOST="$S3_ENDPOINT_HOST"
log WARN "S3 not configured - using placeholder values"
fi
# Ensure AWS SDK always has a region when Museum needs to presign URLs (e.g. replication)
if [ "$S3_NOT_CONFIGURED" = "false" ]; then
if [ -n "$S3_REGION" ] && [ -z "${AWS_REGION:-}" ]; then
export AWS_REGION="$S3_REGION"
fi
if [ -n "${AWS_REGION:-}" ] && [ -z "${AWS_DEFAULT_REGION:-}" ]; then
export AWS_DEFAULT_REGION="$AWS_REGION"
fi
fi
DEFAULT_FORCE_PATH_STYLE="true"
if printf '%s' "$S3_ENDPOINT_HOST" | grep -q '\.r2\.cloudflarestorage\.com$'; then
if [ -z "${S3_FORCE_PATH_STYLE:-}" ] && [ -z "${ENTE_S3_FORCE_PATH_STYLE:-}" ]; then
log INFO "Detected Cloudflare R2 endpoint; defaulting to path-style URLs (required by R2)"
fi
fi
S3_FORCE_PATH_STYLE_RAW="${S3_FORCE_PATH_STYLE:-${ENTE_S3_FORCE_PATH_STYLE:-$DEFAULT_FORCE_PATH_STYLE}}"
S3_FORCE_PATH_STYLE="$(printf '%s' "$S3_FORCE_PATH_STYLE_RAW" | tr '[:upper:]' '[:lower:]')"
S3_ARE_LOCAL_BUCKETS="$(printf '%s' "${S3_ARE_LOCAL_BUCKETS:-${ENTE_S3_ARE_LOCAL_BUCKETS:-false}}" | tr '[:upper:]' '[:lower:]')"
DEFAULT_SECONDARY_DC="wasabi-eu-central-2-v3"
DEFAULT_COLD_DC="scw-eu-fr-v3"
S3_VALID_DC_NAMES=("b2-eu-cen" "scw-eu-fr" "scw-eu-fr-locked" "scw-eu-fr-v3" "wasabi-eu-central-2" "wasabi-eu-central-2-v3" "wasabi-eu-central-2-derived" "b5" "b6")
validate_s3_dc() {
local candidate="$1"
local fallback="$2"
local label="$3"
if [ -z "$candidate" ]; then
printf '%s\n' "$fallback"
return
fi
for allowed in "${S3_VALID_DC_NAMES[@]}"; do
if [ "$candidate" = "$allowed" ]; then
printf '%s\n' "$candidate"
return
fi
done
log WARN "Ignoring unknown $label S3 data center '$candidate'; falling back to $fallback"
printf '%s\n' "$fallback"
}
S3_PRIMARY_DC="$(validate_s3_dc "${S3_PRIMARY_DC:-${ENTE_S3_PRIMARY_DC:-}}" "b2-eu-cen" "primary")"
S3_SECONDARY_DC="$(validate_s3_dc "${S3_SECONDARY_DC:-${ENTE_S3_SECONDARY_DC:-}}" "$DEFAULT_SECONDARY_DC" "secondary")"
S3_COLD_DC="$(validate_s3_dc "${S3_COLD_DC:-${ENTE_S3_COLD_DC:-}}" "$DEFAULT_COLD_DC" "cold")"
S3_DERIVED_DC="$(validate_s3_dc "${S3_DERIVED_DC:-${ENTE_S3_DERIVED_DC:-}}" "$S3_PRIMARY_DC" "derived")"
S3_SECONDARY_ENV_PRESENT=false
for value in "$S3_SECONDARY_ENDPOINT" "$S3_SECONDARY_REGION" "$S3_SECONDARY_BUCKET" "$S3_SECONDARY_ACCESS_KEY" "$S3_SECONDARY_SECRET_KEY" "$S3_SECONDARY_PREFIX"; do
if [ -n "$value" ]; then
S3_SECONDARY_ENV_PRESENT=true
break
fi
done
if [ "$S3_NOT_CONFIGURED" = "false" ] && [ "$S3_SECONDARY_ENV_PRESENT" = true ]; then
S3_SECONDARY_REGION="${S3_SECONDARY_REGION:-$S3_REGION}"
S3_SECONDARY_BUCKET="${S3_SECONDARY_BUCKET:-$S3_BUCKET}"
S3_SECONDARY_PREFIX="${S3_SECONDARY_PREFIX:-$S3_PREFIX}"
MISSING_SECONDARY_VARS=()
[ -z "$S3_SECONDARY_ENDPOINT" ] && MISSING_SECONDARY_VARS+=("S3_SECONDARY_ENDPOINT")
[ -z "$S3_SECONDARY_ACCESS_KEY" ] && MISSING_SECONDARY_VARS+=("S3_SECONDARY_ACCESS_KEY")
[ -z "$S3_SECONDARY_SECRET_KEY" ] && MISSING_SECONDARY_VARS+=("S3_SECONDARY_SECRET_KEY")
if [ "${#MISSING_SECONDARY_VARS[@]}" -gt 0 ]; then
log ERROR "Secondary S3 configuration incomplete (missing: ${MISSING_SECONDARY_VARS[*]}). Replication disabled."
S3_SECONDARY_ENABLED=false
S3_SECONDARY_DC=""
else
S3_SECONDARY_ENABLED=true
fi
else
S3_SECONDARY_ENABLED=false
S3_SECONDARY_DC=""
fi
S3_COLD_ENV_PRESENT=false
for value in "$S3_COLD_ENDPOINT" "$S3_COLD_REGION" "$S3_COLD_BUCKET" "$S3_COLD_ACCESS_KEY" "$S3_COLD_SECRET_KEY" "$S3_COLD_PREFIX"; do
if [ -n "$value" ]; then
S3_COLD_ENV_PRESENT=true
break
fi
done
if [ "$S3_NOT_CONFIGURED" = "false" ] && [ "$S3_COLD_ENV_PRESENT" = true ]; then
S3_COLD_REGION="${S3_COLD_REGION:-$S3_REGION}"
S3_COLD_BUCKET="${S3_COLD_BUCKET:-$S3_BUCKET}"
S3_COLD_PREFIX="${S3_COLD_PREFIX:-$S3_PREFIX}"
MISSING_COLD_VARS=()
[ -z "$S3_COLD_ENDPOINT" ] && MISSING_COLD_VARS+=("S3_COLD_ENDPOINT")
[ -z "$S3_COLD_ACCESS_KEY" ] && MISSING_COLD_VARS+=("S3_COLD_ACCESS_KEY")
[ -z "$S3_COLD_SECRET_KEY" ] && MISSING_COLD_VARS+=("S3_COLD_SECRET_KEY")
if [ "${#MISSING_COLD_VARS[@]}" -gt 0 ]; then
log ERROR "Cold storage configuration incomplete (missing: ${MISSING_COLD_VARS[*]}). Replication disabled."
S3_COLD_ENABLED=false
S3_COLD_DC=""
else
S3_COLD_ENABLED=true
fi
else
S3_COLD_ENABLED=false
S3_COLD_DC=""
fi
S3_DERIVED_ENV_PRESENT=false
for value in "$S3_DERIVED_ENDPOINT" "$S3_DERIVED_REGION" "$S3_DERIVED_BUCKET" "$S3_DERIVED_ACCESS_KEY" "$S3_DERIVED_SECRET_KEY" "$S3_DERIVED_PREFIX"; do
if [ -n "$value" ]; then
S3_DERIVED_ENV_PRESENT=true
break
fi
done
if [ "$S3_NOT_CONFIGURED" = "false" ]; then
if [ "$S3_DERIVED_ENV_PRESENT" = true ]; then
S3_DERIVED_REGION="${S3_DERIVED_REGION:-$S3_REGION}"
S3_DERIVED_BUCKET="${S3_DERIVED_BUCKET:-$S3_BUCKET}"
S3_DERIVED_PREFIX="${S3_DERIVED_PREFIX:-$S3_PREFIX}"
MISSING_DERIVED_VARS=()
[ -z "$S3_DERIVED_ENDPOINT" ] && MISSING_DERIVED_VARS+=("S3_DERIVED_ENDPOINT")
[ -z "$S3_DERIVED_ACCESS_KEY" ] && MISSING_DERIVED_VARS+=("S3_DERIVED_ACCESS_KEY")
[ -z "$S3_DERIVED_SECRET_KEY" ] && MISSING_DERIVED_VARS+=("S3_DERIVED_SECRET_KEY")
if [ "${#MISSING_DERIVED_VARS[@]}" -gt 0 ]; then
log ERROR "Derived S3 configuration incomplete (missing: ${MISSING_DERIVED_VARS[*]}). Falling back to primary bucket for derived assets."
S3_DERIVED_CUSTOM=false
S3_DERIVED_ENDPOINT="$S3_ENDPOINT"
S3_DERIVED_REGION="$S3_REGION"
S3_DERIVED_BUCKET="$S3_BUCKET"
S3_DERIVED_ACCESS_KEY="$S3_ACCESS_KEY"
S3_DERIVED_SECRET_KEY="$S3_SECRET_KEY"
S3_DERIVED_PREFIX="$S3_PREFIX"
else
S3_DERIVED_CUSTOM=true
fi
else
S3_DERIVED_CUSTOM=false
S3_DERIVED_ENDPOINT="$S3_ENDPOINT"
S3_DERIVED_REGION="$S3_REGION"
S3_DERIVED_BUCKET="$S3_BUCKET"
S3_DERIVED_ACCESS_KEY="$S3_ACCESS_KEY"
S3_DERIVED_SECRET_KEY="$S3_SECRET_KEY"
S3_DERIVED_PREFIX="$S3_PREFIX"
fi
else
S3_DERIVED_CUSTOM=false
fi
if [ "$S3_NOT_CONFIGURED" = "false" ] && [ "$S3_SECONDARY_ENABLED" = true ]; then
parse_s3_endpoint "$S3_SECONDARY_ENDPOINT" "$S3_SECONDARY_PREFIX" S3_SECONDARY_ENDPOINT_HOST S3_SECONDARY_PREFIX
else
S3_SECONDARY_ENDPOINT_HOST=""
fi
if [ "$S3_NOT_CONFIGURED" = "false" ] && [ "$S3_COLD_ENABLED" = true ]; then
parse_s3_endpoint "$S3_COLD_ENDPOINT" "$S3_COLD_PREFIX" S3_COLD_ENDPOINT_HOST S3_COLD_PREFIX
else
S3_COLD_ENDPOINT_HOST=""
fi
S3_REPLICATION_ENABLED=false
if [ "$S3_SECONDARY_ENABLED" = true ] && [ "$S3_COLD_ENABLED" = true ]; then
S3_REPLICATION_ENABLED=true
elif [ "$S3_SECONDARY_ENABLED" = true ] && [ "$S3_COLD_ENABLED" = false ]; then
log WARN "Secondary hot bucket configured without a cold storage bucket; S3 replication remains disabled."
elif [ "$S3_SECONDARY_ENABLED" = false ] && [ "$S3_COLD_ENABLED" = true ]; then
log WARN "Cold storage bucket configured without a secondary hot bucket; S3 replication remains disabled."
fi
S3_DCS=()
add_s3_dc() {
local candidate="$1"
if [ -z "$candidate" ]; then
return
fi
for existing in "${S3_DCS[@]}"; do
if [ "$existing" = "$candidate" ]; then
return
fi
done
S3_DCS+=("$candidate")
}
add_s3_dc "$S3_PRIMARY_DC"
if [ "$S3_SECONDARY_ENABLED" = true ]; then
add_s3_dc "$S3_SECONDARY_DC"
fi
if [ "$S3_COLD_ENABLED" = true ]; then
add_s3_dc "$S3_COLD_DC"
fi
add_s3_dc "$S3_DERIVED_DC"
write_s3_dc_block() {
local dc="$1"
local key="$2"
local secret="$3"
local endpoint="$4"
local region="$5"
local bucket="$6"
local prefix="$7"
cat >> "$MUSEUM_CONFIG" <<EOF_CFG
$dc:
key: "$key"
secret: "$secret"
endpoint: "$endpoint"
region: "$region"
bucket: "$bucket"
EOF_CFG
if [ -n "$prefix" ]; then
printf ' path_prefix: "%s"\n' "$prefix" >> "$MUSEUM_CONFIG"
fi
printf '\n' >> "$MUSEUM_CONFIG"
}
S3_PREFIX_DISPLAY="${S3_PREFIX:-<none>}"
log INFO "Resolved S3 configuration: host=$S3_ENDPOINT_HOST region=$S3_REGION pathStyle=$S3_FORCE_PATH_STYLE localBuckets=$S3_ARE_LOCAL_BUCKETS primaryDC=$S3_PRIMARY_DC derivedDC=$S3_DERIVED_DC prefix=$S3_PREFIX_DISPLAY"
if [ "$S3_SECONDARY_ENABLED" = true ]; then
S3_SECONDARY_PREFIX_DISPLAY="${S3_SECONDARY_PREFIX:-<none>}"
log INFO "Secondary replication target: host=$S3_SECONDARY_ENDPOINT_HOST region=$S3_SECONDARY_REGION dc=$S3_SECONDARY_DC prefix=$S3_SECONDARY_PREFIX_DISPLAY"
else
log INFO "Secondary hot-storage bucket not configured; replication disabled."
fi
if [ "$S3_COLD_ENABLED" = true ]; then
S3_COLD_PREFIX_DISPLAY="${S3_COLD_PREFIX:-<none>}"
log INFO "Cold storage target: host=$S3_COLD_ENDPOINT_HOST region=$S3_COLD_REGION dc=$S3_COLD_DC prefix=$S3_COLD_PREFIX_DISPLAY"
else
log INFO "Cold storage bucket not configured."
fi
if [ "$S3_DERIVED_CUSTOM" = true ]; then
S3_DERIVED_PREFIX_DISPLAY="${S3_DERIVED_PREFIX:-<none>}"
log INFO "Derived storage target: host=$S3_DERIVED_ENDPOINT_HOST region=$S3_DERIVED_REGION dc=$S3_DERIVED_DC prefix=$S3_DERIVED_PREFIX_DISPLAY"
else
log INFO "Derived storage reuses the primary bucket."
fi
DEFAULT_GIN_TRUSTED_PROXIES="127.0.0.1,::1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
GIN_TRUSTED_PROXIES="${GIN_TRUSTED_PROXIES:-$DEFAULT_GIN_TRUSTED_PROXIES}"
export GIN_TRUSTED_PROXIES
log INFO "Configured trusted proxy ranges for Museum: $GIN_TRUSTED_PROXIES"
MASTER_KEY_FILE="$SECRETS_DIR/master_key"
HASH_KEY_FILE="$SECRETS_DIR/hash_key"
JWT_SECRET_FILE="$SECRETS_DIR/jwt_secret"
SESSION_SECRET_FILE="$SECRETS_DIR/session_secret"
SMTP_HOST="${CLOUDRON_MAIL_SMTP_SERVER:-mail}"
SMTP_PORT="${CLOUDRON_MAIL_SMTPS_PORT:-2465}"
SMTP_ENCRYPTION="tls"
SMTP_USERNAME="${CLOUDRON_MAIL_SMTP_USERNAME:-}"
SMTP_PASSWORD="${CLOUDRON_MAIL_SMTP_PASSWORD:-}"
SMTP_EMAIL="${CLOUDRON_MAIL_FROM:-no-reply@$RP_ID}"
SMTP_SENDER_NAME="${CLOUDRON_MAIL_FROM_DISPLAY_NAME:-Ente}"
if [ -n "$SMTP_HOST" ]; then
log INFO "SMTP configured for $SMTP_HOST:$SMTP_PORT (encryption: ${SMTP_ENCRYPTION:-none})"
else
log INFO "SMTP not configured; Museum will skip outbound email"
fi
normalize_b64() {
local value="$1"
value="$(printf '%s' "$value" | tr -d '\r\n')"
value="$(printf '%s' "$value" | tr -- '-_' '+/')"
local mod=$(( ${#value} % 4 ))
if [ $mod -eq 2 ]; then
value="${value}=="
elif [ $mod -eq 3 ]; then
value="${value}="
elif [ $mod -eq 1 ]; then
value=""
fi
printf '%s' "$value"
}
normalize_b64url() {
local value="$1"
value="$(printf '%s' "$value" | tr -d '\r\n')"
value="$(printf '%s' "$value" | tr -- '+/' '-_')"
local mod=$(( ${#value} % 4 ))
if [ $mod -eq 2 ]; then
value="${value}=="
elif [ $mod -eq 3 ]; then
value="${value}="
elif [ $mod -eq 1 ]; then
value=""
fi
printf '%s' "$value"
}
generate_b64() {
local bytes="$1"
openssl rand -base64 "$bytes" | tr -d '\n'
}
generate_b64url() {
local bytes="$1"
openssl rand -base64 "$bytes" | tr -- '+/' '-_' | tr -d '\n'
}
ensure_secret() {
local file="$1"
local bytes="$2"
local mode="$3"
local current=""
if [ -f "$file" ]; then
current="$(tr -d '\n' < "$file")"
fi
if [ "$mode" = "b64" ]; then
current="$(normalize_b64 "$current")"
if [ -z "$current" ]; then
current="$(generate_b64 "$bytes")"
fi
else
current="$(normalize_b64url "$current")"
if [ -z "$current" ]; then
current="$(generate_b64url "$bytes")"
fi
fi
printf '%s
' "$current" > "$file"
}
ensure_secret "$MASTER_KEY_FILE" 32 b64
ensure_secret "$HASH_KEY_FILE" 64 b64
ensure_secret "$JWT_SECRET_FILE" 32 b64url
ensure_secret "$SESSION_SECRET_FILE" 32 b64url
MASTER_KEY="$(tr -d '\n' < "$MASTER_KEY_FILE")"
HASH_KEY="$(tr -d '\n' < "$HASH_KEY_FILE")"
JWT_SECRET="$(tr -d '\n' < "$JWT_SECRET_FILE")"
SESSION_SECRET="$(tr -d '\n' < "$SESSION_SECRET_FILE")"
chown cloudron:cloudron "$MASTER_KEY_FILE" "$HASH_KEY_FILE" "$JWT_SECRET_FILE" "$SESSION_SECRET_FILE"
chmod 600 "$MASTER_KEY_FILE" "$HASH_KEY_FILE" "$JWT_SECRET_FILE" "$SESSION_SECRET_FILE"
log INFO "Ensuring Museum runtime assets"
sync_dir() {
local source="$1"
local target="$2"
if [ -d "$source" ]; then
log INFO "Syncing $(basename "$source") into data directory"
rm -rf "$target"
cp -a "$source" "$target"
chown -R cloudron:cloudron "$target"
else
log WARN "Missing expected directory: $source"
fi
}
sync_dir "$APP_DIR/server/migrations" "$MUSEUM_RUNTIME_DIR/migrations"
sync_dir "$APP_DIR/server/web-templates" "$MUSEUM_RUNTIME_DIR/web-templates"
sync_dir "$APP_DIR/server/mail-templates" "$MUSEUM_RUNTIME_DIR/mail-templates"
sync_dir "$APP_DIR/server/assets" "$MUSEUM_RUNTIME_DIR/assets"
sync_dir "$APP_DIR/data" "$MUSEUM_RUNTIME_DIR/data"
if [ ! -x "$MUSEUM_BIN" ]; then
log ERROR "Museum binary not found at $MUSEUM_BIN"
exit 1
fi
# Always regenerate Museum config to pick up S3 changes
log INFO "Rendering Museum configuration"
HOT_STORAGE_SECONDARY_LINE=""
if [ "$S3_SECONDARY_ENABLED" = true ]; then
HOT_STORAGE_SECONDARY_LINE=" secondary: ${S3_SECONDARY_DC}"
fi
cat > "$MUSEUM_CONFIG" <<EOF_CFG
log-file: ""
http:
port: 8080
use-tls: false
apps:
public-albums: "$ALBUMS_URL"
public-locker: "$PHOTOS_URL"
accounts: "$ACCOUNTS_URL"
cast: "$CAST_URL"
family: "$FAMILY_URL"
custom-domain:
cname: "${APP_FQDN}"
db:
host: ${CLOUDRON_POSTGRESQL_HOST}
port: ${CLOUDRON_POSTGRESQL_PORT}
name: ${CLOUDRON_POSTGRESQL_DATABASE}
user: ${CLOUDRON_POSTGRESQL_USERNAME}
password: ${CLOUDRON_POSTGRESQL_PASSWORD}
sslmode: disable
s3:
are_local_buckets: ${S3_ARE_LOCAL_BUCKETS}
use_path_style_urls: ${S3_FORCE_PATH_STYLE}
hot_storage:
primary: ${S3_PRIMARY_DC}
${HOT_STORAGE_SECONDARY_LINE}
derived-storage: ${S3_DERIVED_DC}
EOF_CFG
for dc in "${S3_DCS[@]}"; do
if [ "$dc" = "$S3_PRIMARY_DC" ]; then
write_s3_dc_block "$dc" "$S3_ACCESS_KEY" "$S3_SECRET_KEY" "$S3_ENDPOINT_HOST" "$S3_REGION" "$S3_BUCKET" "$S3_PREFIX"
elif [ "$S3_SECONDARY_ENABLED" = true ] && [ "$dc" = "$S3_SECONDARY_DC" ]; then
write_s3_dc_block "$dc" "$S3_SECONDARY_ACCESS_KEY" "$S3_SECONDARY_SECRET_KEY" "$S3_SECONDARY_ENDPOINT_HOST" "$S3_SECONDARY_REGION" "$S3_SECONDARY_BUCKET" "$S3_SECONDARY_PREFIX"
elif [ "$S3_COLD_ENABLED" = true ] && [ "$dc" = "$S3_COLD_DC" ]; then
write_s3_dc_block "$dc" "$S3_COLD_ACCESS_KEY" "$S3_COLD_SECRET_KEY" "$S3_COLD_ENDPOINT_HOST" "$S3_COLD_REGION" "$S3_COLD_BUCKET" "$S3_COLD_PREFIX"
elif [ "$dc" = "$S3_DERIVED_DC" ]; then
if [ "$S3_DERIVED_CUSTOM" = true ]; then
write_s3_dc_block "$dc" "$S3_DERIVED_ACCESS_KEY" "$S3_DERIVED_SECRET_KEY" "$S3_DERIVED_ENDPOINT_HOST" "$S3_DERIVED_REGION" "$S3_DERIVED_BUCKET" "$S3_DERIVED_PREFIX"
else
write_s3_dc_block "$dc" "$S3_ACCESS_KEY" "$S3_SECRET_KEY" "$S3_ENDPOINT_HOST" "$S3_REGION" "$S3_BUCKET" "$S3_PREFIX"
fi
fi
done
if [ "$S3_REPLICATION_ENABLED" = true ]; then
cat >> "$MUSEUM_CONFIG" <<'EOF_CFG'
replication:
enabled: true
EOF_CFG
else
cat >> "$MUSEUM_CONFIG" <<'EOF_CFG'
replication:
enabled: false
EOF_CFG
fi
cat >> "$MUSEUM_CONFIG" <<EOF_CFG
smtp:
host: "${SMTP_HOST}"
port: "${SMTP_PORT}"
username: "${SMTP_USERNAME}"
password: "${SMTP_PASSWORD}"
email: "${SMTP_EMAIL}"
sender-name: "${SMTP_SENDER_NAME}"
encryption: "${SMTP_ENCRYPTION}"
internal:
silent: false
disable-registration: false
webauthn:
rpid: "$RP_ID"
rporigins:
- "$PHOTOS_URL"
EOF_CFG
if [ "$USE_SUBDOMAIN_ROUTING" = true ]; then
cat >> "$MUSEUM_CONFIG" <<EOF_CFG
- "$ACCOUNTS_URL"
EOF_CFG
fi
cat >> "$MUSEUM_CONFIG" <<EOF_CFG
key:
encryption: $MASTER_KEY
hash: $HASH_KEY
jwt:
secret: $JWT_SECRET
sessions:
secret: $SESSION_SECRET
EOF_CFG
if [ -n "${CLOUDRON_OIDC_CLIENT_ID:-}" ] && [ -n "${CLOUDRON_OIDC_CLIENT_SECRET:-}" ] && [ -n "${CLOUDRON_OIDC_IDENTIFIER:-}" ]; then
cat >> "$MUSEUM_CONFIG" <<EOF_CFG
oidc:
enabled: true
issuer: "${CLOUDRON_OIDC_IDENTIFIER}"
client_id: "${CLOUDRON_OIDC_CLIENT_ID}"
client_secret: "${CLOUDRON_OIDC_CLIENT_SECRET}"
redirect_url: "$API_BASE/api/v1/session/callback"
EOF_CFG
fi
MUSEUM_OVERRIDE="$CONFIG_DIR/museum.override.yaml"
if [ -f "$MUSEUM_OVERRIDE" ]; then
log INFO "Applying museum override from $MUSEUM_OVERRIDE"
printf '\n# --- Cloudron override (user-provided) ---\n' >> "$MUSEUM_CONFIG"
cat "$MUSEUM_OVERRIDE" >> "$MUSEUM_CONFIG"
fi
chown cloudron:cloudron "$MUSEUM_CONFIG"
chmod 600 "$MUSEUM_CONFIG"
log INFO "Preparing frontend assets"
if [ -d "$WEB_SOURCE_DIR" ]; then
for app in photos accounts auth cast albums family; do
if [ -d "$WEB_SOURCE_DIR/$app" ]; then
log INFO "Updating $app frontend assets"
rm -rf "$WEB_RUNTIME_DIR/$app"
cp -a "$WEB_SOURCE_DIR/$app" "$WEB_RUNTIME_DIR/$app"
chown -R cloudron:cloudron "$WEB_RUNTIME_DIR/$app"
else
log WARN "Missing built frontend for $app"
fi
done
else
log ERROR "Frontend assets directory missing at $WEB_SOURCE_DIR"
fi
rewrite_frontend_reference() {
local search="$1"
local replace="$2"
local count=0
local file
if [ -z "$search" ] || [ -z "$replace" ] || [ "$search" = "$replace" ]; then
return
fi
# Create escaped versions for different contexts
local search_escaped_slash="${search//\//\\/}"
local replace_escaped_slash="${replace//\//\\/}"
local search_json="${search//\//\\/}"
local replace_json="${replace//\//\\/}"
while IFS= read -r -d '' file; do
local file_changed=false
# Check if file contains any variant of the search string
if LC_ALL=C grep -q -e "$search" -e "$search_escaped_slash" "$file" 2>/dev/null; then
# Replace plain URL
if sed -i "s|$search|$replace|g" "$file" 2>/dev/null; then
file_changed=true
fi
# Replace backslash-escaped URL (common in JavaScript strings)
if sed -i "s|$search_escaped_slash|$replace_escaped_slash|g" "$file" 2>/dev/null; then
file_changed=true
fi
# Replace double-backslash-escaped URL (common in JSON)
if sed -i "s|${search//\//\\\\/}|${replace//\//\\\\/}|g" "$file" 2>/dev/null; then
file_changed=true
fi
if [ "$file_changed" = true ]; then
chown cloudron:cloudron "$file"
count=$((count + 1))
fi
fi
done < <(find "$WEB_RUNTIME_DIR" -type f \( -name "*.js" -o -name "*.json" -o -name "*.html" -o -name "*.css" -o -name "*.txt" \) -print0)
if [ "$count" -gt 0 ]; then
log INFO "Replaced '$search' with '$replace' in $count frontend files"
fi
}
inject_api_origin_script() {
local file="$1"
python3 - "$file" "$API_ORIGIN" <<'PY'
import sys
from pathlib import Path
path = Path(sys.argv[1])
origin = sys.argv[2]
marker = 'data-ente-config="api-origin"'
try:
data = path.read_text()
except (FileNotFoundError, UnicodeDecodeError):
sys.exit(2)
if marker in data or '<head>' not in data:
sys.exit(2)
snippet = f'<script {marker}>(function(){{try{{var o={origin!r};window.__ENTE_CONFIG__=Object.assign(window.__ENTE_CONFIG__||{{}},{{apiOrigin:o}});if(window.localStorage){{window.localStorage.setItem("apiOrigin",o);}}}}catch(e){{}}}})();</script>'
data = data.replace('<head>', '<head>' + snippet, 1)
path.write_text(data)
PY
local status=$?
if [ "$status" -eq 0 ]; then
log INFO "Injected API origin snippet into ${file#$WEB_RUNTIME_DIR/}"
fi
}
if [ -d "$WEB_RUNTIME_DIR" ]; then
log INFO "Rewriting frontend endpoints for local deployment"
FRONTEND_REPLACEMENTS=(
"ENTE_API_ORIGIN_PLACEHOLDER|$API_ORIGIN"
"ENTE_WEB_ENDPOINT_PLACEHOLDER|$PHOTOS_URL"
"https://api.ente.io|$API_ORIGIN"
"https://accounts.ente.io|$ACCOUNTS_URL"
"https://auth.ente.io|$AUTH_URL"
"https://cast.ente.io|$CAST_URL"
"https://photos.ente.io|$PHOTOS_URL"
"https://web.ente.io|$PHOTOS_URL"
"https://albums.ente.io|$ALBUMS_URL"
"https://family.ente.io|$FAMILY_URL"
"https://ente.io|$PHOTOS_URL"
)
OLD_IFS="$IFS"
for entry in "${FRONTEND_REPLACEMENTS[@]}"; do
IFS="|" read -r search replace <<<"$entry"
rewrite_frontend_reference "$search" "$replace"
done
IFS="$OLD_IFS"
log INFO "Injecting API origin bootstrap script for frontends"
while IFS= read -r -d '' html_file; do
inject_api_origin_script "$html_file" || true
done < <(find "$WEB_RUNTIME_DIR" -type f -name "*.html" -print0)
fi
log INFO "Ensuring CLI configuration"
CLI_DATA_DIR="$DATA_DIR/cli-data"
mkdir -p "$CLI_DATA_DIR"
chown cloudron:cloudron "$CLI_DATA_DIR"
chmod 700 "$CLI_DATA_DIR"
CLI_EXPORT_DIR="$CLI_DATA_DIR/export"
if [ ! -d "$CLI_EXPORT_DIR" ]; then
mkdir -p "$CLI_EXPORT_DIR"
chown cloudron:cloudron "$CLI_EXPORT_DIR"
chmod 700 "$CLI_EXPORT_DIR"
fi
CLI_DATA_CONFIG="$CLI_DATA_DIR/config.yaml"
if [ ! -f "$CLI_DATA_CONFIG" ]; then
cat > "$CLI_DATA_CONFIG" <<EOF_CLI_DATA
endpoint:
api: ${API_ORIGIN}
log:
http: false
EOF_CLI_DATA
chown cloudron:cloudron "$CLI_DATA_CONFIG"
chmod 600 "$CLI_DATA_CONFIG"
log INFO "Initialised CLI data configuration at $CLI_DATA_CONFIG"
fi
CLI_HOME="$DATA_DIR/home/.ente"
mkdir -p "$CLI_HOME"
if [ ! -f "$CLI_HOME/config.yaml" ]; then
ln -sf "$CLI_DATA_CONFIG" "$CLI_HOME/config.yaml"
fi
chown -R cloudron:cloudron "$DATA_DIR/home" "$CLI_HOME"
chmod 700 "$DATA_DIR/home"
log INFO "Rendering Caddy configuration"
if [ "$USE_SUBDOMAIN_ROUTING" = true ]; then
cat > "$CADDY_CONFIG" <<EOF_CADDY
{
admin off
auto_https off
}
:3080 {
log {
level INFO
output stdout
format filter {
wrap json
fields {
request>remote_ip replace {http.request.header.X-Real-Ip}
request>headers>X-Real-Ip delete
}
}
}
encode gzip
@options {
method OPTIONS
header Origin *
}
handle @options {
header Access-Control-Allow-Origin "{http.request.header.Origin}"
header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
header Access-Control-Allow-Headers "*"
header Access-Control-Allow-Credentials "true"
header Access-Control-Max-Age "3600"
header Vary "Origin"
respond 204
}
handle_path /api/* {
@api_cors_subdomain header Origin *
header @api_cors_subdomain {
Access-Control-Allow-Origin {http.request.header.Origin}
Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
Access-Control-Allow-Headers "*"
Access-Control-Allow-Credentials "true"
Vary "Origin"
defer
}
reverse_proxy localhost:8080 {
trusted_proxies private_ranges
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
handle /health {
rewrite * /ping
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
handle /ping {
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
handle /public/* {
reverse_proxy localhost:8080
}
@user_api path_regexp user_api ^/users($|/.*)
handle @user_api {
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
@museum_api_get {
method GET HEAD
path_regexp museum_api_get ^/(admin|authenticator|billing|cast|collections|custom-domain|diff|discount|email-hash|emails-from-hashes|emergency-contacts|family|file|file-link|files|fire|info|job|mail|metrics|multipart-upload-urls|offers|options|pass-info|passkeys|public-collection|push|queue|remote-store|storage-bonus|thumbnail|trash|unknown-api|upload-urls|user|user-entity|verify-password)(/|$)
}
handle @museum_api_get {
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
@write_methods {
not method GET
not method HEAD
}
handle @write_methods {
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
@photos_host host ${PHOTOS_HOST}
handle @photos_host {
root * $WEB_RUNTIME_DIR/photos
try_files {path} {path}/index.html {path}.html index.html
file_server
}
@accounts_host host ${ACCOUNTS_HOST}
handle @accounts_host {
root * $WEB_RUNTIME_DIR/accounts
try_files {path} {path}/index.html {path}.html index.html
file_server
}
@auth_host host ${AUTH_HOST}
handle @auth_host {
root * $WEB_RUNTIME_DIR/auth
try_files {path} {path}/index.html {path}.html index.html
file_server
}
@cast_host host ${CAST_HOST}
handle @cast_host {
root * $WEB_RUNTIME_DIR/cast
try_files {path} {path}/index.html {path}.html index.html
file_server
}
@albums_host host ${ALBUMS_HOST}
handle @albums_host {
root * $WEB_RUNTIME_DIR/albums
try_files {path} {path}/index.html {path}.html index.html
file_server
}
@family_host host ${FAMILY_HOST}
handle @family_host {
root * $WEB_RUNTIME_DIR/family
try_files {path} {path}/index.html {path}.html index.html
file_server
}
handle {
respond "Not Found" 404
}
}
EOF_CADDY
else
cat > "$CADDY_CONFIG" <<EOF_CADDY
{
admin off
auto_https off
}
:3080 {
log {
level INFO
output stdout
format filter {
wrap json
fields {
request>remote_ip replace {http.request.header.X-Real-Ip}
request>headers>X-Real-Ip delete
}
}
}
encode gzip
@options {
method OPTIONS
header Origin *
}
handle @options {
header Access-Control-Allow-Origin "{http.request.header.Origin}"
header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
header Access-Control-Allow-Headers "*"
header Access-Control-Allow-Credentials "true"
header Access-Control-Max-Age "3600"
header Vary "Origin"
respond 204
}
handle_path /api/* {
@api_cors_path header Origin *
header @api_cors_path {
Access-Control-Allow-Origin {http.request.header.Origin}
Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
Access-Control-Allow-Headers "*"
Access-Control-Allow-Credentials "true"
Vary "Origin"
defer
}
reverse_proxy localhost:8080 {
trusted_proxies private_ranges
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
handle /health {
rewrite * /ping
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
handle /ping {
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
handle /public/* {
reverse_proxy localhost:8080
}
@user_api_path path_regexp user_api_path ^/users($|/.*)
handle @user_api_path {
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
@museum_api_get_path {
method GET HEAD
path_regexp museum_api_get_path ^/(admin|authenticator|billing|cast|collections|custom-domain|diff|discount|email-hash|emails-from-hashes|emergency-contacts|family|file|file-link|files|fire|info|job|mail|metrics|multipart-upload-urls|offers|options|pass-info|passkeys|public-collection|push|queue|remote-store|storage-bonus|thumbnail|trash|unknown-api|upload-urls|user|user-entity|verify-password)(/|$)
}
handle @museum_api_get_path {
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
@write_methods_path {
not method GET
not method HEAD
}
handle @write_methods_path {
reverse_proxy localhost:8080 {
header_up Host {http.request.host}
header_up X-Real-IP {http.request.header.X-Forwarded-For}
header_up X-Forwarded-For {http.request.header.X-Forwarded-For}
header_up X-Forwarded-Proto {http.request.header.X-Forwarded-Proto}
}
}
handle /_next/* {
root * $WEB_RUNTIME_DIR
try_files {path} auth{path} accounts{path} photos{path} cast{path} albums{path} family{path}
file_server
}
handle /images/* {
root * $WEB_RUNTIME_DIR/photos
file_server
}
handle /auth/* {
root * $WEB_RUNTIME_DIR
try_files {path} {path}/index.html /auth/index.html
file_server
}
handle /accounts/* {
root * $WEB_RUNTIME_DIR
try_files {path} {path}/index.html /accounts/index.html
file_server
}
handle /cast/* {
root * $WEB_RUNTIME_DIR
try_files {path} {path}/index.html /cast/index.html
file_server
}
handle /family/* {
root * $WEB_RUNTIME_DIR
try_files {path} {path}/index.html /family/index.html
file_server
}
handle /albums/* {
root * $WEB_RUNTIME_DIR
try_files {path} {path}/index.html /albums/index.html
file_server
}
handle /photos/* {
root * $WEB_RUNTIME_DIR
try_files {path} {path}/index.html /photos/index.html
file_server
}
handle {
root * $WEB_RUNTIME_DIR
try_files {path} {path}/index.html /photos/index.html
file_server
}
}
EOF_CADDY
fi
chown cloudron:cloudron "$CADDY_CONFIG"
log INFO "Validating Caddy configuration"
if ! caddy validate --config "$CADDY_CONFIG" > "$TMP_DIR/caddy-validate.log" 2>&1; then
cat "$TMP_DIR/caddy-validate.log"
log ERROR "Caddy configuration validation failed"
exit 1
fi
log INFO "Testing PostgreSQL connectivity"
if ! PGPASSWORD="$CLOUDRON_POSTGRESQL_PASSWORD" psql -h "$CLOUDRON_POSTGRESQL_HOST" -p "$CLOUDRON_POSTGRESQL_PORT" \
-U "$CLOUDRON_POSTGRESQL_USERNAME" -d "$CLOUDRON_POSTGRESQL_DATABASE" -c "SELECT 1" >/dev/null 2>&1; then
log ERROR "Unable to connect to PostgreSQL"
exit 1
fi
if [ "$S3_NOT_CONFIGURED" = "true" ]; then
log WARN "S3 not configured - creating configuration page"
mkdir -p "$WEB_RUNTIME_DIR/config"
cat > "$WEB_RUNTIME_DIR/config/index.html" <<'EOF_CONFIG'
<!DOCTYPE html>
<html>
<head>
<title>Ente Configuration Required</title>
<style>
body { font-family: sans-serif; max-width: 800px; margin: 50px auto; padding: 20px; }
h1 { color: #2d2d2d; }
code { background: #f4f4f4; padding: 2px 6px; border-radius: 3px; }
pre { background: #f4f4f4; padding: 15px; border-radius: 5px; overflow-x: auto; }
.warning { background: #fff3cd; border-left: 4px solid #ffc107; padding: 15px; margin: 20px 0; }
</style>
</head>
<body>
<h1>Ente Configuration Required</h1>
<div class="warning">
<strong>S3 Storage Not Configured</strong>
<p>Ente requires S3-compatible object storage to function. Please configure your S3 credentials.</p>
</div>
<h2>Configuration Steps</h2>
<ol>
<li>Open the Cloudron dashboard</li>
<li>Go to your Ente app and open the Terminal</li>
<li>Edit <code>/app/data/config/s3.env</code>:
<pre>nano /app/data/config/s3.env</pre>
</li>
<li>Add your S3 credentials:
<pre>S3_ENDPOINT=https://your-s3-endpoint.com
S3_REGION=your-region
S3_BUCKET=your-bucket-name
S3_ACCESS_KEY=your-access-key
S3_SECRET_KEY=your-secret-key</pre>
</li>
<li>Save the file and restart the app from the Cloudron dashboard</li>
</ol>
<p>For more information, see the <a href="https://help.ente.io/self-hosting/guides/external-s3">Ente S3 Configuration Guide</a>.</p>
</body>
</html>
EOF_CONFIG
chown -R cloudron:cloudron "$WEB_RUNTIME_DIR/config"
log INFO "Starting Caddy in configuration mode"
setpriv --reuid=cloudron --regid=cloudron --init-groups caddy file-server --listen :3080 --root "$WEB_RUNTIME_DIR/config" &
CADDY_PID=$!
MUSEUM_PID=""
else
log INFO "Starting Museum server and Caddy"
setpriv --reuid=cloudron --regid=cloudron --init-groups /bin/bash -lc "cd '$MUSEUM_RUNTIME_DIR' && exec stdbuf -oL '$MUSEUM_BIN'" &
MUSEUM_PID=$!
setpriv --reuid=cloudron --regid=cloudron --init-groups caddy run --config "$CADDY_CONFIG" --watch &
CADDY_PID=$!
fi
terminate() {
log INFO "Shutting down services"
if [ -n "$MUSEUM_PID" ]; then
kill "$MUSEUM_PID" 2>/dev/null || true
fi
if [ -n "$CADDY_PID" ]; then
kill "$CADDY_PID" 2>/dev/null || true
fi
if [ -n "$MUSEUM_PID" ]; then
wait "$MUSEUM_PID" 2>/dev/null || true
fi
if [ -n "$CADDY_PID" ]; then
wait "$CADDY_PID" 2>/dev/null || true
fi
}
trap terminate TERM INT
if [ -n "$MUSEUM_PID" ]; then
wait -n "$MUSEUM_PID" "$CADDY_PID"
else
wait "$CADDY_PID"
fi
EXIT_CODE=$?
terminate
log ERROR "Service exited unexpectedly"
exit "$EXIT_CODE"