1
0
Fork 0
mirror of https://github.com/immich-app/immich.git synced 2024-12-29 15:11:58 +00:00
immich/docker/docker-compose.prod.yml
renovate[bot] 7404688622
chore(deps): update redis:6.2-alpine docker digest to 60727c1 (#6688)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-01-27 11:54:49 -05:00

76 lines
2 KiB
YAML

version: "3.8"
name: immich-prod
x-server-build: &server-common
image: immich-server:latest
build:
context: ../
dockerfile: server/Dockerfile
volumes:
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
restart: always
services:
immich-server:
container_name: immich_server
command: [ "./start-server.sh" ]
<<: *server-common
ports:
- 2283:3001
depends_on:
- redis
- database
immich-microservices:
container_name: immich_microservices
command: [ "./start-microservices.sh" ]
<<: *server-common
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
depends_on:
- redis
- database
- immich-server
immich-machine-learning:
container_name: immich_machine_learning
image: immich-machine-learning:latest
# extends:
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
build:
context: ../machine-learning
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:60727c1c07f014f3ee3f90f4326792c39dcea8b3fd118be467f0b5e2b75a455d
restart: always
database:
container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.1.11@sha256:0335a1a22f8c5dd1b697f14f079934f5152eaaa216c09b61e293be285491f8ee
env_file:
- .env
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
volumes:
- ${UPLOAD_LOCATION}/postgres:/var/lib/postgresql/data
restart: always
volumes:
model-cache: