Update: build method

This commit is contained in:
dx-tan 2023-12-21 12:55:14 +07:00
parent 368d892619
commit c423c0516e
5 changed files with 355 additions and 33 deletions

View File

@ -6,7 +6,7 @@ ocr_engine = {
"device": device "device": device
}, },
"recognizer": { "recognizer": {
"version": "/workspace/cope2n-ai-fi/weights/models/sdsvtr/hub/sbt_20231210_sdsrv.pth", "version": "/workspace/cope2n-ai-fi/weights/models/ocr_engine/sdsvtr/hub/sbt_20231218_e116_sdstr.pth",
"device": device "device": device
}, },
"deskew": { "deskew": {
@ -29,7 +29,7 @@ kvu_model = {
"option": "sbt_v2", "option": "sbt_v2",
"model": { "model": {
"pretrained_model_path": "/workspace/cope2n-ai-fi/weights/layoutxlm-base", "pretrained_model_path": "/workspace/cope2n-ai-fi/weights/layoutxlm-base",
"config": "/workspace/cope2n-ai-fi/weights/models/sdsvkvu/key_value_understanding_for_sbt-20231130-184433/base.yaml", "config": "/workspace/cope2n-ai-fi/weights/models/sdsvkvu/key_value_understanding_for_sbt-20231219-143837/base.yaml",
"checkpoint": "/workspace/cope2n-ai-fi/weights/models/sdsvkvu/key_value_understanding_for_sbt-20231130-184433/checkpoints/best_model.pth" "checkpoint": "/workspace/cope2n-ai-fi/weights/models/sdsvkvu/key_value_understanding_for_sbt-20231219-143837/checkpoints/best_model.pth"
} }
} }

View File

@ -0,0 +1,98 @@
import subprocess
import hashlib
import random
import string
import os
import boto3
BASH_FILE = './deploy_images.sh'
S3_ENDPOINT = ""
S3_ACCESS_KEY = "AKIA3AFPFVWZD77UACHE"
S3_SECRET_KEY = "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"
S3_BUCKET = "ocr-deployment-config"
class MinioS3Client:
def __init__(self, access_key, secret_key, bucket_name, endpoint=""):
self.endpoint = endpoint
self.access_key = access_key
self.secret_key = secret_key
self.bucket_name = bucket_name
try:
if len(endpoint) > 0:
self.s3_client = boto3.client(
's3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
else:
self.s3_client = boto3.client(
's3',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
except Exception as e:
print(f"[WARM] Unable to create an s3 client, {e}")
self.s3_client = None
def update_object(self, s3_key, content):
try:
res = self.s3_client.put_object(
Bucket=self.bucket_name,
Key=s3_key,
Body=content
)
# print(f"Object '{s3_key}' updated in S3 with res: {res}")
return res
except Exception as e:
print(f"Error updating object in S3: {str(e)}")
def upload_file(self, local_file_path, s3_key):
try:
res = self.s3_client.upload_file(local_file_path, self.bucket_name, s3_key)
# print(f"File '{local_file_path}' uploaded to S3 with key '{s3_key}'")
return res
except Exception as e:
print(f"Error uploading file to S3: {str(e)}")
def download_file(self, s3_key, local_file_path):
try:
res = self.s3_client.download_file(self.bucket_name, s3_key, local_file_path)
# print(f"File '{s3_key}' downloaded from S3 to '{local_file_path}'")
return res
except Exception as e:
print(f"Error downloading file from S3: {str(e)}")
def random_hash():
# Generate a random input
random_input = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
# Generate the hash
hash_object = hashlib.sha256(random_input.encode())
random_hash = hash_object.hexdigest()
return random_hash
def deploy():
# Define the variable
tag = str(random_hash()[:8])
print(tag)
# Execute the Bash script with the variable as a command-line argument
# os.chmod(BASH_FILE, 777)
os.chmod(BASH_FILE, 0o755)
subprocess.call([BASH_FILE, tag])
# TODO: Update to S3
s3_client = MinioS3Client(S3_ACCESS_KEY, S3_SECRET_KEY, S3_BUCKET, S3_ENDPOINT)
local_compose_file = f"./docker-compose_{tag}.yml"
local_env_file = f"./.env_{tag}.yml"
s3_compose_file = f"docker-yaml-files/docker-compose_{tag}.yml"
s3_env_file = f"env-files/.env_{tag}.yml"
print(f"[INFO]: Uploading compose and env file to s3...")
s3_client.upload_file(local_compose_file, s3_compose_file)
s3_client.upload_file(local_env_file, s3_env_file)
if __name__=="__main__":
deploy()

22
deploy_images.sh Normal file → Executable file
View File

@ -0,0 +1,22 @@
#!/bin/bash
docker compose -f docker-compose-dev.yml build
tag=$1
echo "Tag received from Python: $tag"
echo "Building AI image with tag: $tag..."
docker tag sidp/cope2n-ai-fi-sbt:latest public.ecr.aws/v4n9y6r8/sidp/cope2n-ai-fi-sbt:${tag}
# docker push public.ecr.aws/v4n9y6r8/sidp/cope2n-ai-fi-sbt:${tag}
echo "Building BE image with tag: $tag..."
docker tag sidp/cope2n-be-fi-sbt:latest public.ecr.aws/v4n9y6r8/sidp/cope2n-be-fi-sbt:${tag}
# docker push public.ecr.aws/v4n9y6r8/sidp/cope2n-ai-fi-sbt:${tag}
echo "Building BE image with tag: $tag..."
docker tag sidp/cope2n-fe-fi-sbt:latest public.ecr.aws/v4n9y6r8/sidp/cope2n-fe-fi-sbt:${tag}
# docker push public.ecr.aws/v4n9y6r8/sidp/cope2n-ai-fi-sbt:${tag}
cp ./docker-compose.yml ./docker-compose_${tag}.yml
sed -i "s/{{tag}}/$tag/g" ./docker-compose_${tag}.yml
cp .env .env_${tag}

219
docker-compose-dev.yml Executable file
View File

@ -0,0 +1,219 @@
# TODO: use docker-compose extend: for compact purpose
version: '3.0'
networks:
ctel-sbt:
driver: bridge
services:
cope2n-fi-sbt:
build:
context: cope2n-ai-fi
shm_size: 10gb
dockerfile: Dockerfile
shm_size: 10gb
restart: always
networks:
- ctel-sbt
privileged: true
image: sidp/cope2n-ai-fi-sbt
environment:
- CELERY_BROKER=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672
- CUDA_VISIBLE_DEVICES=0
# volumes:
# - ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
working_dir: /workspace/cope2n-ai-fi
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# command: bash -c "tail -f > /dev/null"
command: bash run.sh
deploy:
mode: replicated
replicas: 3
# Back-end services
be-ctel-sbt:
build:
context: cope2n-api
dockerfile: Dockerfile
image: sidp/cope2n-be-fi-sbt
environment:
- MEDIA_ROOT=${MEDIA_ROOT}
- DB_ENGINE=${DB_ENGINE}
- DB_SCHEMA=${DB_SCHEMA}
- DB_USER=${DB_USER}
- DB_PASSWORD=${DB_PASSWORD}
- DB_HOST=${DB_HOST}
- DB_PORT=${DB_PUBLIC_PORT}
- DEBUG=${DEBUG}
- CORS_ALLOWED_ORIGINS=${CORS_ALLOWED_ORIGINS}
- BASE_PORT=${BASE_PORT}
- CTEL_KEY=${CTEL_KEY}
- SECRET_KEY=${SECRET_KEY}
- DB_INTERNAL_KEY=${DB_INTERNAL_KEY}
- ALLOWED_HOSTS=${ALLOWED_HOSTS}
- BROKER_URL=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672
- BASE_URL=http://be-ctel-sbt:${BASE_PORT}
- BASE_UI_URL=http://fe:${VITE_PORT}
- AUTH_TOKEN_LIFE_TIME=${AUTH_TOKEN_LIFE_TIME}
- IMAGE_TOKEN_LIFE_TIME=${IMAGE_TOKEN_LIFE_TIME}
- INTERNAL_SDS_KEY=${INTERNAL_SDS_KEY}
- FI_USER_NAME=${FI_USER_NAME}
- FI_PASSWORD=${FI_PASSWORD}
- S3_ENDPOINT=${S3_ENDPOINT}
- S3_ACCESS_KEY=${S3_ACCESS_KEY}
- S3_SECRET_KEY=${S3_SECRET_KEY}
- S3_BUCKET_NAME=${S3_BUCKET_NAME}
restart: always
networks:
- ctel-sbt
volumes:
- ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT}
- ./data/BE_static:/app/static
- ./cope2n-api:/app
working_dir: /app
depends_on:
db-sbt:
condition: service_started
command: sh -c "chmod -R 777 /app/static; sleep 5; python manage.py collectstatic --no-input &&
python manage.py migrate &&
python manage.py compilemessages &&
gunicorn fwd.asgi:application -k uvicorn.workers.UvicornWorker --timeout 300 -b 0.0.0.0:9000" # pre-makemigrations on prod
minio:
image: minio/minio
environment:
- MINIO_ROOT_USER=${S3_ACCESS_KEY}
- MINIO_ROOT_PASSWORD=${S3_SECRET_KEY}
- MINIO_ACCESS_KEY=${S3_ACCESS_KEY}
- MINIO_SECRET_KEY=${S3_SECRET_KEY}
volumes:
- ./data/minio_data:/data
networks:
- ctel-sbt
restart: always
command: server --address :9884 --console-address :9885 /data
profiles: ["local"]
createbuckets:
image: minio/mc
depends_on:
- minio
entrypoint: >
/bin/sh -c "
/usr/bin/mc alias set myminio http://minio:9884 ${S3_ACCESS_KEY} ${S3_SECRET_KEY};
/usr/bin/mc mb myminio/${S3_BUCKET_NAME};
/usr/bin/mc policy set public myminio/${S3_BUCKET_NAME};
exit 0;
"
networks:
- ctel-sbt
profiles: ["local"]
result-cache:
image: redis:6.2-alpine
restart: always
command: redis-server --save 20 1 --loglevel warning
networks:
- ctel-sbt
be-celery-sbt:
build:
context: cope2n-api
dockerfile: Dockerfile
image: sidp/cope2n-be-fi-sbt
environment:
- MEDIA_ROOT=${MEDIA_ROOT}
- PYTHONPATH=${PYTHONPATH}:/app # For import module
- PYTHONUNBUFFERED=1 # For show print log
- DB_ENGINE=${DB_ENGINE}
- DB_SCHEMA=${DB_SCHEMA}
- DB_USER=${DB_USER}
- DB_PASSWORD=${DB_PASSWORD}
- DB_HOST=${DB_HOST}
- DB_PORT=${DB_PUBLIC_PORT}
- BROKER_URL=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672
- BASE_UI_URL=http://fe:${VITE_PORT}
- DEBUG=${DEBUG}
- DB_INTERNAL_KEY=${DB_INTERNAL_KEY}
- IMAGE_TOKEN_LIFE_TIME=${IMAGE_TOKEN_LIFE_TIME}
- CTEL_KEY=${CTEL_KEY}
- SECRET_KEY=${SECRET_KEY}
- ALLOWED_HOSTS=${ALLOWED_HOSTS}
- S3_ENDPOINT=${S3_ENDPOINT}
- S3_ACCESS_KEY=${S3_ACCESS_KEY}
- S3_SECRET_KEY=${S3_SECRET_KEY}
- S3_BUCKET_NAME=${S3_BUCKET_NAME}
- BASE_URL=http://be-ctel-sbt:${BASE_PORT}
- REDIS_HOST=result-cache
- REDIS_PORT=6379
restart: always
networks:
- ctel-sbt
depends_on:
db-sbt:
condition: service_started
rabbitmq-sbt:
condition: service_started
volumes:
- ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT}
- ./cope2n-api:/app
working_dir: /app
command: sh -c "celery -A fwd_api.celery_worker.worker worker -l INFO -c 5"
# Back-end persistent
db-sbt:
restart: always
mem_reservation: 500m
image: postgres:14.7-alpine
volumes:
- ./data/postgres_data:/var/lib/postgresql/data
networks:
- ctel-sbt
environment:
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASSWORD}
- POSTGRES_DB=${DB_SCHEMA}
rabbitmq-sbt:
mem_reservation: 600m
restart: always
image: rabbitmq:3.10-alpine
working_dir: /workspace/cope2n-api
networks:
- ctel-sbt
environment:
- RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER}
- RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS}
# Front-end services
fe-sbt:
restart: always
build:
context: cope2n-fe
shm_size: 10gb
dockerfile: Dockerfile
image: sidp/cope2n-fe-fi-sbt
shm_size: 10gb
privileged: true
ports:
- 80:80
depends_on:
be-ctel-sbt:
condition: service_started
be-celery-sbt:
condition: service_started
environment:
- VITE_PROXY=http://be-ctel-sbt:${BASE_PORT}
- VITE_API_BASE_URL=http://fe-sbt:80
volumes:
- ./data/BE_static:/backend-static
networks:
- ctel-sbt
volumes:
db_data:

View File

@ -7,38 +7,22 @@ networks:
services: services:
cope2n-fi-sbt: cope2n-fi-sbt:
build:
context: cope2n-ai-fi
shm_size: 10gb
dockerfile: Dockerfile
shm_size: 10gb shm_size: 10gb
mem_limit: 10gb
restart: always restart: always
image: public.ecr.aws/v4n9y6r8/sidp/cope2n-ai-fi-sbt:{{tag}}
networks: networks:
- ctel-sbt - ctel-sbt
privileged: true privileged: true
environment: environment:
- CELERY_BROKER=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672 - CELERY_BROKER=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@rabbitmq-sbt:5672
- CUDA_VISIBLE_DEVICES=0
volumes:
- ./cope2n-ai-fi:/workspace/cope2n-ai-fi # for dev container only
working_dir: /workspace/cope2n-ai-fi working_dir: /workspace/cope2n-ai-fi
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# command: bash -c "tail -f > /dev/null"
command: bash run.sh command: bash run.sh
deploy: deploy:
mode: replicated mode: replicated
replicas: 3 replicas: 2
# Back-end services # Back-end services
be-ctel-sbt: be-ctel-sbt:
build:
context: cope2n-api
dockerfile: Dockerfile
environment: environment:
- MEDIA_ROOT=${MEDIA_ROOT} - MEDIA_ROOT=${MEDIA_ROOT}
- DB_ENGINE=${DB_ENGINE} - DB_ENGINE=${DB_ENGINE}
@ -67,12 +51,13 @@ services:
- S3_SECRET_KEY=${S3_SECRET_KEY} - S3_SECRET_KEY=${S3_SECRET_KEY}
- S3_BUCKET_NAME=${S3_BUCKET_NAME} - S3_BUCKET_NAME=${S3_BUCKET_NAME}
restart: always restart: always
mem_limit: 10gb
image: public.ecr.aws/v4n9y6r8/sidp/cope2n-be-fi-sbt:{{tag}}
networks: networks:
- ctel-sbt - ctel-sbt
volumes: volumes:
- ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT} - ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT}
- ./data/BE_static:/app/static - BE_static:/app/static
- ./cope2n-api:/app
working_dir: /app working_dir: /app
depends_on: depends_on:
db-sbt: db-sbt:
@ -114,14 +99,12 @@ services:
result-cache: result-cache:
image: redis:6.2-alpine image: redis:6.2-alpine
restart: always restart: always
mem_limit: 10gb
command: redis-server --save 20 1 --loglevel warning command: redis-server --save 20 1 --loglevel warning
networks: networks:
- ctel-sbt - ctel-sbt
be-celery-sbt: be-celery-sbt:
build:
context: cope2n-api
dockerfile: Dockerfile
environment: environment:
- MEDIA_ROOT=${MEDIA_ROOT} - MEDIA_ROOT=${MEDIA_ROOT}
- PYTHONPATH=${PYTHONPATH}:/app # For import module - PYTHONPATH=${PYTHONPATH}:/app # For import module
@ -148,6 +131,7 @@ services:
- REDIS_HOST=result-cache - REDIS_HOST=result-cache
- REDIS_PORT=6379 - REDIS_PORT=6379
restart: always restart: always
image: public.ecr.aws/v4n9y6r8/sidp/cope2n-be-fi-sbt:{{tag}}
networks: networks:
- ctel-sbt - ctel-sbt
depends_on: depends_on:
@ -157,7 +141,6 @@ services:
condition: service_started condition: service_started
volumes: volumes:
- ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT} - ${HOST_MEDIA_FOLDER}:${MEDIA_ROOT}
- ./cope2n-api:/app
working_dir: /app working_dir: /app
command: sh -c "celery -A fwd_api.celery_worker.worker worker -l INFO -c 5" command: sh -c "celery -A fwd_api.celery_worker.worker worker -l INFO -c 5"
@ -175,6 +158,7 @@ services:
- POSTGRES_USER=${DB_USER} - POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASSWORD} - POSTGRES_PASSWORD=${DB_PASSWORD}
- POSTGRES_DB=${DB_SCHEMA} - POSTGRES_DB=${DB_SCHEMA}
profiles: ["local"]
rabbitmq-sbt: rabbitmq-sbt:
mem_reservation: 600m mem_reservation: 600m
@ -190,11 +174,9 @@ services:
# Front-end services # Front-end services
fe-sbt: fe-sbt:
restart: always restart: always
build: mem_limit: 4gb
context: cope2n-fe
shm_size: 10gb
dockerfile: Dockerfile
shm_size: 10gb shm_size: 10gb
image: public.ecr.aws/v4n9y6r8/sidp/cope2n-fe-fi-sbt:{{tag}}
privileged: true privileged: true
ports: ports:
- 80:80 - 80:80
@ -207,9 +189,10 @@ services:
- VITE_PROXY=http://be-ctel-sbt:${BASE_PORT} - VITE_PROXY=http://be-ctel-sbt:${BASE_PORT}
- VITE_API_BASE_URL=http://fe-sbt:80 - VITE_API_BASE_URL=http://fe-sbt:80
volumes: volumes:
- ./data/BE_static:/backend-static - BE_static:/backend-static
networks: networks:
- ctel-sbt - ctel-sbt
volumes: volumes:
db_data: db_data:
BE_static: