services:
  cope2n-fi:
    build:
      context: .
      shm_size: 10gb
      dockerfile: Dockerfile
    shm_size: 10gb
    image: tuanlv/cope2n-ai-fi
    container_name: "tuanlv-cope2n-ai-fi-dev"
    network_mode: "host"
    privileged: true
    volumes:
      - /mnt/hdd4T/OCR/tuanlv/05-copen-ai/cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
      - /mnt/hdd2T/dxtan/TannedCung/OCR/cope2n-api:/workspace/cope2n-api
      - /mnt/hdd2T/dxtan/TannedCung/OCR/cope2n-fe:/workspace/cope2n-fe
    # deploy:
    #   resources:
    #     reservations:
    #       devices:
    #         - driver: nvidia
    #           count: 1
    #           capabilities: [gpu]
    command: bash -c "tail -f > /dev/null"

  # train_component:
  #   build:
  #     context: .
  #     shm_size: 10gb
  #     args:
  #         - NODE_ENV=local
  #     dockerfile: Dockerfile
  #   shm_size: 10gb
  #   image: tannedcung/kubeflow-text-recognition
  #   container_name: "TannedCung-kubeflow-TextRecognition-Train"
  #   network_mode: "host"
  #   privileged: true
  #   depends_on:
  #     data_preparation_component:
  #       condition: service_completed_successfully
  #   volumes:
  #     # - /mnt/hdd2T/dxtan/TannedCung/VI/vi-vision-inspection-kubeflow/components/text_recognition:/workspace
  #     - /mnt/ssd500/datnt/mmocr/logs/satrn_lite_2023-04-13_fwd_finetuned:/weights/
  #     - /mnt/hdd2T/dxtan/TannedCung/OCR/TextRecognition/test_input/:/test_input/
  #     - /mnt/hdd2T/dxtan/TannedCung/OCR/TextRecognition/train_output/:/train_output/
  #     - /mnt/hdd2T/dxtan/TannedCung/Data/:/Data
  #     - /mnt/hdd2T/dxtan/TannedCung/VI/vi-vision-inspection-kubeflow/components/text_recognition/configs:/configs
  #   command: bash -c "python /workspace/tools/train.py --config=/workspace/configs/satrn_lite.py --load_from=/weights/textrecog_fwd_tuned_20230413_params.pth --gpu_id=1 --img_path_prefix=/Data --vimlops_token=123 --total_epochs=5 --batch_size=32 --work_dir=/train_output"
    # command: bash -c "tail -f > /dev/null"