# TODO: use docker-compose extend: for compact purpose version: '3.0' networks: ctel-sbt: driver: bridge services: cope2n-fi-sbt: build: context: cope2n-ai-fi shm_size: 10gb dockerfile: Dockerfile shm_size: 10gb restart: always networks: - ctel-sbt privileged: true environment: - CELERY_BROKER=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672 - CUDA_VISIBLE_DEVICES=0 volumes: - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only working_dir: /workspace/cope2n-ai-fi # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: 1 # capabilities: [gpu] # command: bash -c "tail -f > /dev/null" command: bash run.sh deploy: mode: replicated replicas: 1 cope2n-fi-sbt-2: build: context: cope2n-ai-fi shm_size: 10gb dockerfile: Dockerfile shm_size: 10gb restart: always networks: - ctel-sbt privileged: true environment: - CELERY_BROKER=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672 - CUDA_VISIBLE_DEVICES=0 volumes: - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only working_dir: /workspace/cope2n-ai-fi # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: 1 # capabilities: [gpu] # command: bash -c "tail -f > /dev/null" command: bash run.sh deploy: mode: replicated replicas: 1 # Back-end services be-ctel-sbt: build: context: cope2n-api dockerfile: Dockerfile environment: - MEDIA_ROOT=${MEDIA_ROOT} - DB_ENGINE=${DB_ENGINE} - DB_SCHEMA=${DB_SCHEMA} - DB_USER=${DB_USER} - DB_PASSWORD=${DB_PASSWORD} - DB_HOST=${DB_HOST} - DB_PORT=${DB_PUBLIC_PORT} - DEBUG=${DEBUG} - CORS_ALLOWED_ORIGINS=${CORS_ALLOWED_ORIGINS} - BASE_PORT=${BASE_PORT} - CTEL_KEY=${CTEL_KEY} - SECRET_KEY=${SECRET_KEY} - DB_INTERNAL_KEY=${DB_INTERNAL_KEY} - ALLOWED_HOSTS=${ALLOWED_HOSTS} - BROKER_URL=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672 - BASE_URL=http://be-ctel-sbt:${BASE_PORT} - BASE_UI_URL=http://fe:${VITE_PORT} - AUTH_TOKEN_LIFE_TIME=${AUTH_TOKEN_LIFE_TIME} - IMAGE_TOKEN_LIFE_TIME=${IMAGE_TOKEN_LIFE_TIME} - INTERNAL_SDS_KEY=${INTERNAL_SDS_KEY} - FI_USER_NAME=${FI_USER_NAME} - FI_PASSWORD=${FI_PASSWORD} - S3_ENDPOINT=${S3_ENDPOINT} - S3_ACCESS_KEY=${S3_ACCESS_KEY} - S3_SECRET_KEY=${S3_SECRET_KEY} - S3_BUCKET_NAME=${S3_BUCKET_NAME} restart: always networks: - ctel-sbt volumes: - ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT} - ./data/static:/app/static - ./cope2n-api:/app working_dir: /app depends_on: db-sbt: condition: service_started command: sh -c "chmod -R 777 /app/static; sleep 5; python manage.py collectstatic --no-input && python manage.py migrate && python manage.py compilemessages && gunicorn fwd.asgi:application -k uvicorn.workers.UvicornWorker --timeout 300 -b 0.0.0.0:9000" # pre-makemigrations on prod minio: image: minio/minio environment: - MINIO_ROOT_USER=${S3_ACCESS_KEY} - MINIO_ROOT_PASSWORD=${S3_SECRET_KEY} - MINIO_ACCESS_KEY=${S3_ACCESS_KEY} - MINIO_SECRET_KEY=${S3_SECRET_KEY} volumes: - ./data/minio_data:/data networks: - ctel-sbt restart: always command: server --address :9884 --console-address :9885 /data profiles: ["local"] createbuckets: image: minio/mc depends_on: - minio entrypoint: > /bin/sh -c " /usr/bin/mc alias set myminio http://minio:9884 ${S3_ACCESS_KEY} ${S3_SECRET_KEY}; /usr/bin/mc mb myminio/${S3_BUCKET_NAME}; /usr/bin/mc policy set public myminio/${S3_BUCKET_NAME}; exit 0; " networks: - ctel-sbt profiles: ["local"] result-cache: image: redis:6.2-alpine restart: always command: redis-server --save 20 1 --loglevel warning networks: - ctel-sbt be-celery-sbt: build: context: cope2n-api dockerfile: Dockerfile environment: - MEDIA_ROOT=${MEDIA_ROOT} - PYTHONPATH=${PYTHONPATH}:/app # For import module - PYTHONUNBUFFERED=1 # For show print log - DB_ENGINE=${DB_ENGINE} - DB_SCHEMA=${DB_SCHEMA} - DB_USER=${DB_USER} - DB_PASSWORD=${DB_PASSWORD} - DB_HOST=${DB_HOST} - DB_PORT=${DB_PUBLIC_PORT} - BROKER_URL=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672 - BASE_UI_URL=http://fe:${VITE_PORT} - DEBUG=${DEBUG} - DB_INTERNAL_KEY=${DB_INTERNAL_KEY} - IMAGE_TOKEN_LIFE_TIME=${IMAGE_TOKEN_LIFE_TIME} - CTEL_KEY=${CTEL_KEY} - SECRET_KEY=${SECRET_KEY} - ALLOWED_HOSTS=${ALLOWED_HOSTS} - S3_ENDPOINT=${S3_ENDPOINT} - S3_ACCESS_KEY=${S3_ACCESS_KEY} - S3_SECRET_KEY=${S3_SECRET_KEY} - S3_BUCKET_NAME=${S3_BUCKET_NAME} - BASE_URL=http://be-ctel-sbt:${BASE_PORT} - REDIS_HOST=result-cache - REDIS_PORT=6379 restart: always networks: - ctel-sbt depends_on: db-sbt: condition: service_started rabbitmq-sbt: condition: service_started volumes: - ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT} - ./cope2n-api:/app working_dir: /app command: sh -c "celery -A fwd_api.celery_worker.worker worker -l INFO -c 8" # Back-end persistent db-sbt: restart: always mem_reservation: 500m image: postgres:14.7-alpine volumes: - ./data/postgres_data:/var/lib/postgresql/data networks: - ctel-sbt environment: - POSTGRES_USER=${DB_USER} - POSTGRES_PASSWORD=${DB_PASSWORD} - POSTGRES_DB=${DB_SCHEMA} rabbitmq-sbt: mem_reservation: 600m restart: always image: rabbitmq:3.10-alpine working_dir: /workspace/cope2n-api networks: - ctel-sbt environment: - RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER} - RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS} # Front-end services fe-sbt: restart: always build: context: cope2n-fe shm_size: 10gb dockerfile: Dockerfile shm_size: 10gb privileged: true ports: - ${SIDP_SERVICE_PORT:-9881}:80 depends_on: be-ctel-sbt: condition: service_started be-celery-sbt: condition: service_started environment: - VITE_PROXY=http://be-ctel-sbt:${BASE_PORT} - VITE_API_BASE_URL=http://fe-sbt:80 volumes: - ./data/static:/backend-static networks: - ctel-sbt volumes: db_data: