version: '3.0'
# TODO: use docker-compose extend: for compact purpose
networks:
    ctel:
      driver: bridge

services:
  cope2n-fi-manulife-sbt:
    build:
      context: cope2n-ai-fi
      shm_size: 10gb
      dockerfile: Dockerfile
    shm_size: 10gb
    image: tannedcung/cope2n-ai-fi-sbt
    container_name: "TannedCung-cope2n-ai-fi-manulife-sbt-dev"
    networks:
      - ctel
    privileged: true
    environment:
      - CELERY_BROKER=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-manulife-sbt:5672
      - CUDA_VISIBLE_DEVICES=1
    volumes:
      - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
      - ./cope2n-api:/workspace/cope2n-api
      - ./cope2n-fe:/workspace/cope2n-fe
      - ./cope2n-ai-fi/models:/models
    working_dir: /workspace/cope2n-ai-fi
    # deploy:
    #   resources:
    #     reservations:
    #       devices:
    #         - driver: nvidia
    #           count: 1
    #           capabilities: [gpu]
    # command: bash -c "tail -f > /dev/null"
    command: bash run.sh
  # Back-end services
  be-ctel-manulife-sbt:
    build:
      context: cope2n-api
      dockerfile: Dockerfile-dev
    # ports:
    #   - 9800:9000
    image: tannedcung/cope2n-be
    container_name: "TannedCung-cope2n-be-ctel-manulife-sbt-dev"
    environment:
      - MEDIA_ROOT=${MEDIA_ROOT}
      - DB_ENGINE=${DB_ENGINE}
      - DB_SCHEMA=${DB_SCHEMA}
      - DB_USER=${DB_USER}
      - DB_PASSWORD=${DB_PASSWORD}
      - DB_HOST=${DB_HOST}
      - DB_PORT=${DB_PUBLIC_PORT}
      - DEBUG=${DEBUG}
      - CORS_ALLOWED_ORIGINS=${CORS_ALLOWED_ORIGINS}
      - BASE_PORT=${BASE_PORT}
      - CTEL_KEY=${CTEL_KEY}
      - SECRET_KEY=${SECRET_KEY}
      - ALLOWED_HOSTS=${ALLOWED_HOSTS}
      - BROKER_URL=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-manulife-sbt:5672
      - BASE_URL=http://be-ctel-manulife-sbt:${BASE_PORT}
      - BASE_UI_URL=http://fe:${VITE_PORT}
      - AUTH_TOKEN_LIFE_TIME=${AUTH_TOKEN_LIFE_TIME}
      - IMAGE_TOKEN_LIFE_TIME=${IMAGE_TOKEN_LIFE_TIME}
      - INTERNAL_SDS_KEY=${INTERNAL_SDS_KEY}
      - FI_USER_NAME=${FI_USER_NAME}
      - FI_PASSWORD=${FI_PASSWORD}
    # restart: always
    networks:
      - ctel
    volumes:
      - ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT}
      - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
      - ./cope2n-api:/workspace/cope2n-api
      - ./cope2n-fe:/workspace/cope2n-fe
    working_dir: /workspace/cope2n-api
    # depends_on:
    #   db:
    #     condition: service_started
    #   rabbitmq:
    #     condition: service_started
    # command: sh -c "python manage.py collectstatic --no-input && 
    #               python manage.py makemigrations &&
    #               python manage.py compilemessages &&
    command: "gunicorn fwd.asgi:application -k uvicorn.workers.UvicornWorker -b 0.0.0.0:9000" # pre-makemigrations on prod
    # command: sh -c "tail -f > /dev/null"

  be-celery-manulife-sbt:
    # build:
    #   context: cope2n-api
    #   dockerfile: Dockerfile-dev
      # args:
      #   - "UID=${UID:-1000}"
      #   - "GID=${GID:-1000}"
    image: tannedcung/cope2n-be
    container_name: "TannedCung-cope2n-be-celery-manulife-sbt-dev"
    environment:
      - MEDIA_ROOT=${MEDIA_ROOT}
      - PYTHONPATH=${PYTHONPATH}:/app  # For import module
      - PYTHONUNBUFFERED=1  # For show print log
      - DB_SCHEMA=${DB_SCHEMA}
      - DB_USER=${DB_USER}
      - DB_PASSWORD=${DB_PASSWORD}
      - DB_HOST=${DB_HOST}
      - DB_PORT=${DB_INTERNAL_PORT}
      - BROKER_URL=${BROKER_URL}
      - DB_ENGINE=${DB_ENGINE}
      - DEBUG=${DEBUG}
    networks:
      - ctel
    # restart: always
    depends_on:
      db-manulife-sbt:
        condition: service_started
      rabbitmq-manulife-sbt:
        condition: service_started
    volumes:
      - ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT}
      - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
      - ./cope2n-api:/workspace/cope2n-api
      - ./cope2n-fe:/workspace/cope2n-fe
    working_dir: /workspace/cope2n-api
    command: sh -c "celery -A fwd_api.celery_worker.worker worker -l INFO"
  # Back-end persistent
  db-manulife-sbt:
    mem_reservation: 500m
    mem_limit: 1g
    container_name: TannedCung-cope2n-be-manulife-sbt-db
    image: postgres:14.7-alpine
    volumes:
      - db_data:/var/lib/postgresql/data
      - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
      - ./cope2n-api:/workspace/cope2n-api
      - ./cope2n-fe:/workspace/cope2n-fe
    working_dir: /workspace/cope2n-api
    networks:
      - ctel
    environment:
      - POSTGRES_USER=${DB_USER}
      - POSTGRES_PASSWORD=${DB_PASSWORD}
      - POSTGRES_DB=${DB_SCHEMA}

  rabbitmq-manulife-sbt:
    mem_reservation: 600m
    mem_limit: 4g
    container_name: TannedCung-cope2n-be-rabbitmq-manulife-sbt
    restart: always
    image: rabbitmq:3.10-alpine
    ports:
      - 5672:5672
    volumes:
      - rabbitmq_data:/var/lib/rabbitmq
      - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
      - ./cope2n-api:/workspace/cope2n-api
      - ./cope2n-fe:/workspace/cope2n-fe
    working_dir: /workspace/cope2n-api
    networks:
      - ctel
    environment:
      - RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER}
      - RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS}
  # Front-end services
  fe:
    # build:
    #   args:
    #     - PORT=${PORT}
    #   context: cope2n-fe
    #   shm_size: 10gb
      # dockerfile: Dockerfile-dev
    shm_size: 10gb
    image: tannedcung/cope2n-be
    container_name: "TannedCung-cope2n-fe-ctel-manulife-sbt-dev"
    privileged: true
    ports:
      - 9801:9001
    networks:
      - ctel
    volumes:
      - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
      - ./cope2n-api:/workspace/cope2n-api
      - ./cope2n-fe:/workspace/cope2n-fe
    working_dir: /workspace/cope2n-fe
    # deploy:
    #   resources:
    #     reservations:
    #       devices:
    #         - driver: nvidia
    #           count: 1
    #           capabilities: [gpu]
    command: bash -c "source /root/.bashrc && ldconfig && npm start"
    # command: sh -c "tail -f > /dev/null"

volumes:
  db_data:
  rabbitmq_data: