Inject API origin bootstrap and update docs

This commit is contained in:
Andreas Dueren
2025-11-18 21:05:58 -06:00
parent 3b7a853c71
commit f29b570b82
5 changed files with 95 additions and 19 deletions

View File

@@ -2,6 +2,16 @@
# Changelog
## 0.5.7 (2025-11-18)
* Inject the API origin into all served HTML so the Next.js bundles (including `accounts/passkeys`) read the self-hosted endpoint instead of defaulting to `https://api.ente.io`
* Document the working Backblaze B2 CORS JSON that whitelists the wildcard origin + upload operations for desktop casts
## 0.5.6 (2025-11-18)
* Allow the accounts frontend origin in Museums `webauthn.rporigins` when subdomain routing is enabled so passkey enrollment via the desktop flow succeeds
* Document the Ente desktop scheme (`ente://app`) in the recommended S3 CORS rules to keep signed URL fetches working for the desktop client
## 0.5.5 (2025-11-18)
* Validate S3 data-center identifiers so replication only uses the canonical `b2-eu-cen`/`wasabi-eu-central-2-v3`/`scw-eu-fr-v3` keys and update the docs to reflect the upstream requirements

View File

@@ -7,7 +7,7 @@
"contactEmail": "contact@ente.io",
"website": "https://ente.io",
"tagline": "Open source, end-to-end encrypted photo backup",
"version": "0.5.5",
"version": "0.5.7",
"upstreamVersion": "git-main",
"healthCheckPath": "/health",
"httpPort": 3080,

View File

@@ -14,35 +14,35 @@ Before using Ente, configure an S3-compatible object storage provider:
# Authorise once (replace with your key ID/secret)
b2 authorize-account <KEY_ID> <APP_KEY>
# Clear any native rules
b2 bucket update --cors-rules '[]' ente-due-ren allPublic
# Inspect the current bucket type (usually allPrivate) and capture it
BUCKET_TYPE=$(b2 get-bucket ente-due-ren | awk -F'"' '/bucketType/ {print $4}')
# Clear any native rules without changing visibility
b2 update-bucket ente-due-ren "$BUCKET_TYPE" --cors-rules '[]'
# Apply the S3-compatible rule (adjust origins as needed)
cat >cors.json <<'EOF'
[
{
"corsRuleName": "ente-web",
"allowedOrigins": [
"https://ente.due.ren",
"https://accounts.due.ren",
"https://auth.due.ren",
"https://albums.due.ren",
"https://cast.due.ren",
"https://family.due.ren"
],
"corsRuleName": "entephotos",
"allowedOrigins": ["*"],
"allowedHeaders": ["*"],
"allowedOperations": [
"s3_get_object",
"s3_head_object",
"b2_download_file_by_id",
"b2_download_file_by_name",
"b2_download_file_by_id"
"b2_upload_file",
"b2_upload_part",
"s3_get",
"s3_post",
"s3_put",
"s3_head"
],
"exposeHeaders": ["ETag","Content-Length","Content-Type"],
"maxAgeSeconds": 86400
"exposeHeaders": ["X-Amz-Request-Id","X-Amz-Id-2","ETag"],
"maxAgeSeconds": 3600
}
]
EOF
b2 bucket update --cors-rules "$(<cors.json)" ente-due-ren allPublic
b2 update-bucket ente-due-ren "$BUCKET_TYPE" --cors-rules "$(<cors.json)"
```
Verify with `curl -I -H 'Origin: https://ente.due.ren' <signed-url>`; you should see `Access-Control-Allow-Origin`.

View File

@@ -60,7 +60,32 @@ After installing on Cloudron remember to:
- `S3_FORCE_PATH_STYLE=true` translates to `use_path_style_urls=true` (required for R2/MinIO and most LAN storage).
- The data-center identifiers (`b2-eu-cen`, `wasabi-eu-central-2-v3`, `scw-eu-fr-v3`, etc.) are **hard-coded upstream**. Keep the defaults unless you know you are targeting one of the legacy names (as listed in the Ente docs). The start script will ignore unknown values to prevent replication from breaking with empty bucket names.
- Leave the generated `museum/configurations/local.yaml` alone—if you need to append extra settings, do so via `/app/data/config/museum.override.yaml` and only add the keys you actually want to change. Copypasting the full sample `s3:` block from the docs will overwrite the generated credentials with blanks.
- If you are using Cloudflare R2 or another hosted S3 provider, configure your buckets CORS policy to allow the Ente frontends (e.g. `https://ente.due.ren`, `https://accounts.due.ren`, `https://cast.due.ren`, etc.) so that cast/slideshow playback can fetch signed URLs directly from storage. Backblaze B2 also requires clearing its “native” CORS rules; see the script in `POSTINSTALL.md`.
- If you are using Cloudflare R2 or another hosted S3 provider, configure your buckets CORS policy to allow the Ente frontends (e.g. `https://ente.due.ren`, `https://accounts.due.ren`, `https://cast.due.ren`, **and** the desktop scheme `ente://app`) so that cast/slideshow playback and the desktop client can fetch signed URLs directly from storage. Backblaze B2 also requires clearing its “native” CORS rules; see the script in `POSTINSTALL.md`. When using the Backblaze CLI remember to preserve your bucket visibility (`allPrivate` for most installs): run `b2 get-bucket <bucket>` to confirm the current type, then invoke `b2 update-bucket <bucket> <bucketType> --cors-rules "$(<cors.json)"` so you only touch the CORS block. A minimal rule that works with Entes signed URLs looks like:
```bash
cat <<'EOF' >cors.json
[
{
"corsRuleName": "entephotos",
"allowedOrigins": ["*"],
"allowedHeaders": ["*"],
"allowedOperations": [
"b2_download_file_by_id",
"b2_download_file_by_name",
"b2_upload_file",
"b2_upload_part",
"s3_get",
"s3_post",
"s3_put",
"s3_head"
],
"exposeHeaders": ["X-Amz-Request-Id","X-Amz-Id-2","ETag"],
"maxAgeSeconds": 3600
}
]
EOF
b2 update-bucket ente-due-ren allPrivate --cors-rules "$(<cors.json)"
```
Adjust the hostnames and bucket type as needed; afterwards verify with `curl -I -H 'Origin: https://cast.example.com' '<signed-url>'` and ensure `Access-Control-Allow-Origin` is present.
2. When prompted during installation, pick hostnames for the Accounts/Auth/Cast/Albums/Family web apps (they are exposed via Cloudron `httpPorts`). Ensure matching DNS records exist; Cloudron-managed DNS creates them automatically, otherwise point CNAME/A records such as `accounts.<app-domain>` at the primary hostname.
3. To persist tweaks to Museum (for example, seeding super-admin or whitelist entries), create `/app/data/config/museum.override.yaml`. Its contents are appended to the generated `museum/configurations/local.yaml` on every start, so you only need to declare the keys you want to override.
```yaml

View File

@@ -756,6 +756,15 @@ webauthn:
rpid: "$RP_ID"
rporigins:
- "$PHOTOS_URL"
EOF_CFG
if [ "$USE_SUBDOMAIN_ROUTING" = true ]; then
cat >> "$MUSEUM_CONFIG" <<EOF_CFG
- "$ACCOUNTS_URL"
EOF_CFG
fi
cat >> "$MUSEUM_CONFIG" <<EOF_CFG
key:
encryption: $MASTER_KEY
@@ -854,6 +863,33 @@ rewrite_frontend_reference() {
fi
}
inject_api_origin_script() {
local file="$1"
python3 - "$file" "$API_ORIGIN" <<'PY'
import sys
from pathlib import Path
path = Path(sys.argv[1])
origin = sys.argv[2]
marker = 'data-ente-config="api-origin"'
try:
data = path.read_text()
except (FileNotFoundError, UnicodeDecodeError):
sys.exit(2)
if marker in data or '<head>' not in data:
sys.exit(2)
snippet = f'<script {marker}>(function(){{try{{var o={origin!r};window.__ENTE_CONFIG__=Object.assign(window.__ENTE_CONFIG__||{{}},{{apiOrigin:o}});if(window.localStorage){{window.localStorage.setItem("apiOrigin",o);}}}}catch(e){{}}}})();</script>'
data = data.replace('<head>', '<head>' + snippet, 1)
path.write_text(data)
PY
local status=$?
if [ "$status" -eq 0 ]; then
log INFO "Injected API origin snippet into ${file#$WEB_RUNTIME_DIR/}"
fi
}
if [ -d "$WEB_RUNTIME_DIR" ]; then
log INFO "Rewriting frontend endpoints for local deployment"
FRONTEND_REPLACEMENTS=(
@@ -874,6 +910,11 @@ if [ -d "$WEB_RUNTIME_DIR" ]; then
rewrite_frontend_reference "$search" "$replace"
done
IFS="$OLD_IFS"
log INFO "Injecting API origin bootstrap script for frontends"
while IFS= read -r -d '' html_file; do
inject_api_origin_script "$html_file" || true
done < <(find "$WEB_RUNTIME_DIR" -type f -name "*.html" -print0)
fi
log INFO "Ensuring CLI configuration"