Commit remaining Burrow platform work
This commit is contained in:
parent
fff5475914
commit
7f280c08cf
48 changed files with 2508 additions and 1864 deletions
171
Tools/forwardemail-custom-s3.sh
Executable file
171
Tools/forwardemail-custom-s3.sh
Executable file
|
|
@ -0,0 +1,171 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
umask 077
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
Tools/forwardemail-custom-s3.sh \
|
||||
--domain burrow.net \
|
||||
--api-token-file intake/forwardemail_api_token.txt \
|
||||
--s3-endpoint https://<endpoint> \
|
||||
--s3-region <region> \
|
||||
--s3-bucket <bucket> \
|
||||
--s3-access-key-file intake/hetzner-s3-user.txt \
|
||||
--s3-secret-key-file intake/hetzner-s3-secret.txt
|
||||
|
||||
Options:
|
||||
--domain <domain> Forward Email domain to update.
|
||||
--api-token-file <path> File containing the Forward Email API token.
|
||||
--s3-endpoint <url> S3-compatible endpoint URL.
|
||||
--s3-region <region> S3 region string expected by Forward Email.
|
||||
--s3-bucket <name> Bucket used for alias backup uploads.
|
||||
--s3-access-key-file <path> File containing the S3 access key id.
|
||||
--s3-secret-key-file <path> File containing the S3 secret access key.
|
||||
--test-only Skip the update call and only test the saved connection.
|
||||
--help Show this help text.
|
||||
|
||||
Notes:
|
||||
- Secrets are passed to curl through a temporary config file to avoid putting
|
||||
them in the process list.
|
||||
- By default the script updates the domain settings and then calls
|
||||
/test-s3-connection.
|
||||
- For Hetzner Object Storage, use the regional S3 endpoint such as
|
||||
https://hel1.your-objectstorage.com, not an account alias endpoint.
|
||||
EOF
|
||||
}
|
||||
|
||||
fail() {
|
||||
printf 'error: %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_file() {
|
||||
local path="$1"
|
||||
[[ -f "$path" ]] || fail "missing file: $path"
|
||||
}
|
||||
|
||||
read_secret() {
|
||||
local path="$1"
|
||||
local value
|
||||
value="$(tr -d '\r\n' < "$path")"
|
||||
[[ -n "$value" ]] || fail "empty secret file: $path"
|
||||
printf '%s' "$value"
|
||||
}
|
||||
|
||||
domain=""
|
||||
api_token_file=""
|
||||
s3_endpoint=""
|
||||
s3_region=""
|
||||
s3_bucket=""
|
||||
s3_access_key_file=""
|
||||
s3_secret_key_file=""
|
||||
test_only=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--domain)
|
||||
domain="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--api-token-file)
|
||||
api_token_file="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--s3-endpoint)
|
||||
s3_endpoint="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--s3-region)
|
||||
s3_region="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--s3-bucket)
|
||||
s3_bucket="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--s3-access-key-file)
|
||||
s3_access_key_file="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--s3-secret-key-file)
|
||||
s3_secret_key_file="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--test-only)
|
||||
test_only=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
fail "unknown argument: $1"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
[[ -n "$domain" ]] || fail "--domain is required"
|
||||
[[ -n "$api_token_file" ]] || fail "--api-token-file is required"
|
||||
[[ -n "$s3_endpoint" || "$test_only" == true ]] || fail "--s3-endpoint is required unless --test-only is set"
|
||||
[[ -n "$s3_region" || "$test_only" == true ]] || fail "--s3-region is required unless --test-only is set"
|
||||
[[ -n "$s3_bucket" || "$test_only" == true ]] || fail "--s3-bucket is required unless --test-only is set"
|
||||
[[ -n "$s3_access_key_file" || "$test_only" == true ]] || fail "--s3-access-key-file is required unless --test-only is set"
|
||||
[[ -n "$s3_secret_key_file" || "$test_only" == true ]] || fail "--s3-secret-key-file is required unless --test-only is set"
|
||||
|
||||
require_file "$api_token_file"
|
||||
api_token="$(read_secret "$api_token_file")"
|
||||
|
||||
if [[ "$test_only" == false ]]; then
|
||||
require_file "$s3_access_key_file"
|
||||
require_file "$s3_secret_key_file"
|
||||
s3_access_key_id="$(read_secret "$s3_access_key_file")"
|
||||
s3_secret_access_key="$(read_secret "$s3_secret_key_file")"
|
||||
|
||||
case "$s3_endpoint" in
|
||||
http://*|https://*)
|
||||
;;
|
||||
*)
|
||||
fail "--s3-endpoint must start with http:// or https://"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
curl_config="$(mktemp)"
|
||||
trap 'rm -f "$curl_config"' EXIT
|
||||
|
||||
if [[ "$test_only" == false ]]; then
|
||||
cat >"$curl_config" <<EOF
|
||||
silent
|
||||
show-error
|
||||
fail-with-body
|
||||
url = "https://api.forwardemail.net/v1/domains/${domain}"
|
||||
request = "PUT"
|
||||
user = "${api_token}:"
|
||||
data = "has_custom_s3=true"
|
||||
data-urlencode = "s3_endpoint=${s3_endpoint}"
|
||||
data-urlencode = "s3_access_key_id=${s3_access_key_id}"
|
||||
data-urlencode = "s3_secret_access_key=${s3_secret_access_key}"
|
||||
data-urlencode = "s3_region=${s3_region}"
|
||||
data-urlencode = "s3_bucket=${s3_bucket}"
|
||||
EOF
|
||||
|
||||
printf 'Configuring Forward Email custom S3 for %s\n' "$domain" >&2
|
||||
curl --config "$curl_config"
|
||||
printf '\n' >&2
|
||||
fi
|
||||
|
||||
cat >"$curl_config" <<EOF
|
||||
silent
|
||||
show-error
|
||||
fail-with-body
|
||||
url = "https://api.forwardemail.net/v1/domains/${domain}/test-s3-connection"
|
||||
request = "POST"
|
||||
user = "${api_token}:"
|
||||
EOF
|
||||
|
||||
printf 'Testing Forward Email custom S3 for %s\n' "$domain" >&2
|
||||
curl --config "$curl_config"
|
||||
printf '\n' >&2
|
||||
261
Tools/forwardemail-hetzner-storage.py
Executable file
261
Tools/forwardemail-hetzner-storage.py
Executable file
|
|
@ -0,0 +1,261 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import datetime as dt
|
||||
import hashlib
|
||||
import hmac
|
||||
import sys
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlencode, urlparse
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def read_secret(path: str) -> str:
|
||||
value = Path(path).read_text(encoding="utf-8").strip()
|
||||
if not value:
|
||||
raise SystemExit(f"error: empty secret file: {path}")
|
||||
return value
|
||||
|
||||
|
||||
def sign(key: bytes, msg: str) -> bytes:
|
||||
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
||||
|
||||
|
||||
def request(
|
||||
*,
|
||||
method: str,
|
||||
endpoint: str,
|
||||
region: str,
|
||||
access_key: str,
|
||||
secret_key: str,
|
||||
bucket: str,
|
||||
query: dict[str, str] | None = None,
|
||||
body: bytes = b"",
|
||||
content_type: str | None = None,
|
||||
) -> requests.Response:
|
||||
parsed = urlparse(endpoint)
|
||||
if parsed.scheme != "https":
|
||||
raise SystemExit("error: endpoint must use https")
|
||||
|
||||
host = parsed.netloc
|
||||
canonical_uri = f"/{bucket}"
|
||||
query = query or {}
|
||||
canonical_querystring = urlencode(sorted(query.items()), doseq=True, safe="~")
|
||||
|
||||
now = dt.datetime.now(dt.timezone.utc)
|
||||
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
|
||||
date_stamp = now.strftime("%Y%m%d")
|
||||
payload_hash = hashlib.sha256(body).hexdigest()
|
||||
|
||||
headers = {
|
||||
"host": host,
|
||||
"x-amz-content-sha256": payload_hash,
|
||||
"x-amz-date": amz_date,
|
||||
}
|
||||
if content_type:
|
||||
headers["content-type"] = content_type
|
||||
|
||||
signed_headers = ";".join(sorted(headers.keys()))
|
||||
canonical_headers = "".join(f"{name}:{headers[name]}\n" for name in sorted(headers.keys()))
|
||||
canonical_request = "\n".join(
|
||||
[
|
||||
method,
|
||||
canonical_uri,
|
||||
canonical_querystring,
|
||||
canonical_headers,
|
||||
signed_headers,
|
||||
payload_hash,
|
||||
]
|
||||
)
|
||||
|
||||
algorithm = "AWS4-HMAC-SHA256"
|
||||
credential_scope = f"{date_stamp}/{region}/s3/aws4_request"
|
||||
string_to_sign = "\n".join(
|
||||
[
|
||||
algorithm,
|
||||
amz_date,
|
||||
credential_scope,
|
||||
hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(),
|
||||
]
|
||||
)
|
||||
|
||||
k_date = sign(("AWS4" + secret_key).encode("utf-8"), date_stamp)
|
||||
k_region = sign(k_date, region)
|
||||
k_service = sign(k_region, "s3")
|
||||
signing_key = sign(k_service, "aws4_request")
|
||||
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
||||
|
||||
auth_header = (
|
||||
f"{algorithm} Credential={access_key}/{credential_scope}, "
|
||||
f"SignedHeaders={signed_headers}, Signature={signature}"
|
||||
)
|
||||
|
||||
url = f"{parsed.scheme}://{host}{canonical_uri}"
|
||||
if canonical_querystring:
|
||||
url = f"{url}?{canonical_querystring}"
|
||||
|
||||
response = requests.request(
|
||||
method,
|
||||
url,
|
||||
headers={**headers, "Authorization": auth_header},
|
||||
data=body,
|
||||
timeout=30,
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def ensure_bucket(args: argparse.Namespace, bucket: str) -> None:
|
||||
head = request(
|
||||
method="HEAD",
|
||||
endpoint=args.endpoint,
|
||||
region=args.region,
|
||||
access_key=args.access_key,
|
||||
secret_key=args.secret_key,
|
||||
bucket=bucket,
|
||||
)
|
||||
if head.status_code == 200:
|
||||
print(f"{bucket}: exists")
|
||||
return
|
||||
if head.status_code != 404:
|
||||
raise SystemExit(f"error: HEAD {bucket} returned {head.status_code}: {head.text[:200]}")
|
||||
|
||||
body = textwrap.dedent(
|
||||
f"""\
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<LocationConstraint>{args.region}</LocationConstraint>
|
||||
</CreateBucketConfiguration>
|
||||
"""
|
||||
).encode("utf-8")
|
||||
create = request(
|
||||
method="PUT",
|
||||
endpoint=args.endpoint,
|
||||
region=args.region,
|
||||
access_key=args.access_key,
|
||||
secret_key=args.secret_key,
|
||||
bucket=bucket,
|
||||
body=body,
|
||||
content_type="application/xml",
|
||||
)
|
||||
if create.status_code not in (200, 204):
|
||||
raise SystemExit(f"error: PUT {bucket} returned {create.status_code}: {create.text[:200]}")
|
||||
print(f"{bucket}: created")
|
||||
|
||||
|
||||
def put_lifecycle(args: argparse.Namespace, bucket: str) -> None:
|
||||
body = textwrap.dedent(
|
||||
f"""\
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Rule>
|
||||
<ID>expire-forwardemail-backups-after-{args.expire_days}-days</ID>
|
||||
<Status>Enabled</Status>
|
||||
<Filter>
|
||||
<Prefix></Prefix>
|
||||
</Filter>
|
||||
<Expiration>
|
||||
<Days>{args.expire_days}</Days>
|
||||
</Expiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>
|
||||
"""
|
||||
).encode("utf-8")
|
||||
response = request(
|
||||
method="PUT",
|
||||
endpoint=args.endpoint,
|
||||
region=args.region,
|
||||
access_key=args.access_key,
|
||||
secret_key=args.secret_key,
|
||||
bucket=bucket,
|
||||
query={"lifecycle": ""},
|
||||
body=body,
|
||||
content_type="application/xml",
|
||||
)
|
||||
if response.status_code not in (200, 204):
|
||||
raise SystemExit(
|
||||
f"error: PUT lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
|
||||
)
|
||||
print(f"{bucket}: lifecycle set to {args.expire_days} days")
|
||||
|
||||
|
||||
def get_lifecycle(args: argparse.Namespace, bucket: str) -> None:
|
||||
response = request(
|
||||
method="GET",
|
||||
endpoint=args.endpoint,
|
||||
region=args.region,
|
||||
access_key=args.access_key,
|
||||
secret_key=args.secret_key,
|
||||
bucket=bucket,
|
||||
query={"lifecycle": ""},
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise SystemExit(
|
||||
f"error: GET lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
|
||||
)
|
||||
print(f"=== {bucket} lifecycle ===")
|
||||
print(response.text.strip())
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Provision Hetzner object-storage buckets for Forward Email backups."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--endpoint",
|
||||
default="https://hel1.your-objectstorage.com",
|
||||
help="Public S3-compatible endpoint URL. For Hetzner, use the regional endpoint, not the account alias.",
|
||||
)
|
||||
parser.add_argument("--region", default="hel1", help="S3 region.")
|
||||
parser.add_argument(
|
||||
"--access-key-file",
|
||||
default="intake/hetzner-s3-user.txt",
|
||||
help="File containing the S3 access key id.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--secret-key-file",
|
||||
default="intake/hetzner-s3-secret.txt",
|
||||
help="File containing the S3 secret key.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--bucket",
|
||||
action="append",
|
||||
required=True,
|
||||
help="Bucket to provision. Repeat for multiple buckets.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--expire-days",
|
||||
type=int,
|
||||
default=90,
|
||||
help="Lifecycle expiry window in days.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verify-only",
|
||||
action="store_true",
|
||||
help="Skip create/update and only read the current lifecycle.",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
args.access_key = read_secret(args.access_key_file)
|
||||
args.secret_key = read_secret(args.secret_key_file)
|
||||
|
||||
for bucket in args.bucket:
|
||||
if args.verify_only:
|
||||
get_lifecycle(args, bucket)
|
||||
continue
|
||||
ensure_bucket(args, bucket)
|
||||
put_lifecycle(args, bucket)
|
||||
get_lifecycle(args, bucket)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except requests.RequestException as err:
|
||||
raise SystemExit(f"error: request failed: {err}") from err
|
||||
Loading…
Add table
Add a link
Reference in a new issue