fix: make media uploads work behind docker
All checks were successful
CI / test (push) Successful in 26s

- add S3_PUBLIC_ENDPOINT_URL for browser-reachable presigned urls

- support both public/internal file url validation

- configure MinIO bucket CORS in minio-init

- update env examples and docs
This commit is contained in:
2026-03-07 22:52:05 +03:00
parent f95a0e9727
commit ffd63018d6
6 changed files with 32 additions and 9 deletions

View File

@@ -27,6 +27,8 @@ MINIO_API_PORT=9000
MINIO_CONSOLE_PORT=9001 MINIO_CONSOLE_PORT=9001
S3_REGION=us-east-1 S3_REGION=us-east-1
S3_BUCKET_NAME=messenger-media S3_BUCKET_NAME=messenger-media
S3_PUBLIC_ENDPOINT_URL=http://localhost:9000
S3_CORS_ALLOW_ORIGIN=*
S3_PRESIGN_EXPIRE_SECONDS=900 S3_PRESIGN_EXPIRE_SECONDS=900
MAX_UPLOAD_SIZE_BYTES=104857600 MAX_UPLOAD_SIZE_BYTES=104857600

View File

@@ -15,6 +15,7 @@ POSTGRES_DSN=postgresql+asyncpg://postgres:postgres@localhost:5432/messenger
REDIS_URL=redis://localhost:6379/0 REDIS_URL=redis://localhost:6379/0
S3_ENDPOINT_URL=http://localhost:9000 S3_ENDPOINT_URL=http://localhost:9000
S3_PUBLIC_ENDPOINT_URL=http://localhost:9000
S3_ACCESS_KEY=minioadmin S3_ACCESS_KEY=minioadmin
S3_SECRET_KEY=minioadmin S3_SECRET_KEY=minioadmin
S3_REGION=us-east-1 S3_REGION=us-east-1

View File

@@ -36,7 +36,7 @@ celery -A app.celery_app:celery_app worker --loglevel=info
Run full stack (web + api + worker + postgres + redis + minio + mailpit): Run full stack (web + api + worker + postgres + redis + minio + mailpit):
1. cp .env.docker.example .env 1. cp .env.docker.example .env
2. edit `.env` (`SECRET_KEY`, passwords, domain) 2. edit `.env` (`SECRET_KEY`, passwords, domain, `S3_PUBLIC_ENDPOINT_URL`)
3. docker compose up -d --build 3. docker compose up -d --build
2. Open: 2. Open:
- Web: http://localhost - Web: http://localhost
@@ -49,3 +49,5 @@ Run full stack (web + api + worker + postgres + redis + minio + mailpit):
Use production override to close internal ports (postgres/redis/minio/mailpit/backend): Use production override to close internal ports (postgres/redis/minio/mailpit/backend):
docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d --build docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d --build
For media uploads from browser, `S3_PUBLIC_ENDPOINT_URL` must be reachable by users (for example `https://storage.example.com` or `http://SERVER_IP:9000`).

View File

@@ -20,6 +20,7 @@ class Settings(BaseSettings):
redis_url: str = "redis://localhost:6379/0" redis_url: str = "redis://localhost:6379/0"
s3_endpoint_url: str = "http://localhost:9000" s3_endpoint_url: str = "http://localhost:9000"
s3_public_endpoint_url: str | None = None
s3_access_key: str = "minioadmin" s3_access_key: str = "minioadmin"
s3_secret_key: str = "minioadmin" s3_secret_key: str = "minioadmin"
s3_region: str = "us-east-1" s3_region: str = "us-east-1"

View File

@@ -38,13 +38,16 @@ def _sanitize_filename(file_name: str) -> str:
def _build_file_url(bucket: str, object_key: str) -> str: def _build_file_url(bucket: str, object_key: str) -> str:
base = settings.s3_endpoint_url.rstrip("/") base = (settings.s3_public_endpoint_url or settings.s3_endpoint_url).rstrip("/")
encoded_key = quote(object_key) encoded_key = quote(object_key)
return f"{base}/{bucket}/{encoded_key}" return f"{base}/{bucket}/{encoded_key}"
def _allowed_file_url_prefix() -> str: def _allowed_file_url_prefixes() -> tuple[str, ...]:
return f"{settings.s3_endpoint_url.rstrip('/')}/{settings.s3_bucket_name}/" endpoints = [settings.s3_endpoint_url]
if settings.s3_public_endpoint_url:
endpoints.append(settings.s3_public_endpoint_url)
return tuple(f"{endpoint.rstrip('/')}/{settings.s3_bucket_name}/" for endpoint in endpoints)
def _validate_media(file_type: str, file_size: int) -> None: def _validate_media(file_type: str, file_size: int) -> None:
@@ -54,10 +57,10 @@ def _validate_media(file_type: str, file_size: int) -> None:
raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="File size exceeds limit") raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="File size exceeds limit")
def _get_s3_client(): def _get_s3_client(endpoint_url: str):
return boto3.client( return boto3.client(
"s3", "s3",
endpoint_url=settings.s3_endpoint_url, endpoint_url=endpoint_url,
aws_access_key_id=settings.s3_access_key, aws_access_key_id=settings.s3_access_key,
aws_secret_access_key=settings.s3_secret_key, aws_secret_access_key=settings.s3_secret_key,
region_name=settings.s3_region, region_name=settings.s3_region,
@@ -73,7 +76,8 @@ async def generate_upload_url(payload: UploadUrlRequest) -> UploadUrlResponse:
bucket = settings.s3_bucket_name bucket = settings.s3_bucket_name
try: try:
s3_client = _get_s3_client() presign_endpoint = settings.s3_public_endpoint_url or settings.s3_endpoint_url
s3_client = _get_s3_client(presign_endpoint)
upload_url = s3_client.generate_presigned_url( upload_url = s3_client.generate_presigned_url(
"put_object", "put_object",
Params={ Params={
@@ -103,7 +107,7 @@ async def store_attachment_metadata(
payload: AttachmentCreateRequest, payload: AttachmentCreateRequest,
) -> AttachmentRead: ) -> AttachmentRead:
_validate_media(payload.file_type, payload.file_size) _validate_media(payload.file_type, payload.file_size)
if not payload.file_url.startswith(_allowed_file_url_prefix()): if not payload.file_url.startswith(_allowed_file_url_prefixes()):
raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Invalid file URL") raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Invalid file URL")
message = await get_message_by_id(db, payload.message_id) message = await get_message_by_id(db, payload.message_id)

View File

@@ -13,6 +13,7 @@ x-app-env: &app-env
POSTGRES_DSN: postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@postgres:5432/${POSTGRES_DB:-messenger} POSTGRES_DSN: postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@postgres:5432/${POSTGRES_DB:-messenger}
REDIS_URL: redis://redis:6379/0 REDIS_URL: redis://redis:6379/0
S3_ENDPOINT_URL: http://minio:9000 S3_ENDPOINT_URL: http://minio:9000
S3_PUBLIC_ENDPOINT_URL: ${S3_PUBLIC_ENDPOINT_URL:-http://localhost:${MINIO_API_PORT:-9000}}
S3_ACCESS_KEY: ${MINIO_ROOT_USER:-minioadmin} S3_ACCESS_KEY: ${MINIO_ROOT_USER:-minioadmin}
S3_SECRET_KEY: ${MINIO_ROOT_PASSWORD:-minioadmin} S3_SECRET_KEY: ${MINIO_ROOT_PASSWORD:-minioadmin}
S3_REGION: ${S3_REGION:-us-east-1} S3_REGION: ${S3_REGION:-us-east-1}
@@ -88,7 +89,19 @@ services:
entrypoint: > entrypoint: >
/bin/sh -c " /bin/sh -c "
mc alias set local http://minio:9000 ${MINIO_ROOT_USER:-minioadmin} ${MINIO_ROOT_PASSWORD:-minioadmin} && mc alias set local http://minio:9000 ${MINIO_ROOT_USER:-minioadmin} ${MINIO_ROOT_PASSWORD:-minioadmin} &&
mc mb --ignore-existing local/${S3_BUCKET_NAME:-messenger-media} mc mb --ignore-existing local/${S3_BUCKET_NAME:-messenger-media} &&
cat > /tmp/cors-rules.json <<EOF &&
[
{
\"AllowedHeaders\": [\"*\"],
\"AllowedMethods\": [\"GET\", \"PUT\", \"POST\", \"HEAD\"],
\"AllowedOrigins\": [\"${S3_CORS_ALLOW_ORIGIN:-*}\"],
\"ExposeHeaders\": [\"ETag\"],
\"MaxAgeSeconds\": 3600
}
]
EOF
mc cors set local/${S3_BUCKET_NAME:-messenger-media} /tmp/cors-rules.json
" "
restart: "no" restart: "no"