diff --git a/api-cronjob/Dockerfile b/api-cronjob/Dockerfile new file mode 100644 index 0000000..606072c --- /dev/null +++ b/api-cronjob/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.9-slim + +WORKDIR /app + +COPY script.py . + +RUN apt-get update && apt-get -y install curl + +CMD [ "python", "script.py" ] \ No newline at end of file diff --git a/cope2n-api/fwd/settings.py b/cope2n-api/fwd/settings.py index a4f7673..dd5801c 100755 --- a/cope2n-api/fwd/settings.py +++ b/cope2n-api/fwd/settings.py @@ -143,8 +143,8 @@ LANGUAGE_CODE = "en-us" USE_I18N = True CELERY_ENABLE_UTC = False -CELERY_TIMEZONE = "Asia/Ho_Chi_Minh" -TIME_ZONE = "Asia/Ho_Chi_Minh" +CELERY_TIMEZONE = "Asia/Singapore" +TIME_ZONE = "Asia/Singapore" USE_TZ = True # Static files (CSS, JavaScript, Images) @@ -221,7 +221,18 @@ MAX_NUMBER_OF_TEMPLATE = 3 MAX_PAGES_OF_PDF_FILE = 50 OVERVIEW_REFRESH_INTERVAL = 2 -OVERVIEW_REPORT_KEY = "overview" +OVERVIEW_REPORT_ROOT = "overview" +OVERVIEW_REPORT_DURATION = ["30d", "7d"] + +SUBS = { + "SEAU": "AU", + "SESP": "SG", + "SME": "MY", + "SEPCO": "PH", + "TSE": "TH", + "SEIN": "ID", + "ALL": "all" + } CACHES = { 'default': { diff --git a/cope2n-api/fwd_api/api/accuracy_view.py b/cope2n-api/fwd_api/api/accuracy_view.py index dae1a7b..d7e47eb 100644 --- a/cope2n-api/fwd_api/api/accuracy_view.py +++ b/cope2n-api/fwd_api/api/accuracy_view.py @@ -15,8 +15,11 @@ from ..exception.exceptions import InvalidException, RequiredFieldException, Not from ..models import SubscriptionRequest, Report, ReportFile from ..utils.accuracy import shadow_report, MonthReportAccumulate, first_of_list, extract_report_detail_list, IterAvg from ..utils.file import download_from_S3, convert_date_string +from ..utils.redis import RedisUtils from ..utils.process import string_to_boolean -from ..celery_worker.client_connector import c_connector +from ..utils.subsidiary import map_subsidiary_long_to_short, map_subsidiary_short_to_long + +redis_client = RedisUtils() class AccuracyViewSet(viewsets.ViewSet): lookup_field = "username" @@ -226,6 +229,12 @@ class AccuracyViewSet(viewsets.ViewSet): description='Subsidiary', type=OpenApiTypes.STR, ), + OpenApiParameter( + name='report_overview_duration', + location=OpenApiParameter.QUERY, + description=f'open of {settings.OVERVIEW_REPORT_DURATION}', + type=OpenApiTypes.STR, + ), ], responses=None, tags=['Accuracy'] ) @@ -240,12 +249,26 @@ class AccuracyViewSet(viewsets.ViewSet): include_test = string_to_boolean(request.GET.get('include_test', "false")) subsidiary = request.GET.get("subsidiary", "all") is_daily_report = string_to_boolean(request.GET.get('is_daily_report', "false")) - - try: - start_date = timezone.datetime.strptime(start_date_str, '%Y-%m-%dT%H:%M:%S%z') - end_date = timezone.datetime.strptime(end_date_str, '%Y-%m-%dT%H:%M:%S%z') - except ValueError: - raise InvalidException(excArgs="Date format") + report_overview_duration = request.GET.get("report_overview_duration", "") + subsidiary = map_subsidiary_long_to_short(subsidiary) + + if is_daily_report: + if report_overview_duration not in settings.OVERVIEW_REPORT_DURATION: + raise InvalidException(excArgs="overview duration") + end_date = timezone.now() + if report_overview_duration == "30d": + start_date = end_date - timezone.timedelta(days=30) + else: + start_date = end_date - timezone.timedelta(days=7) + start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) + start_date_str = start_date.strftime('%Y-%m-%dT%H:%M:%S%z') + end_date_str = end_date.strftime('%Y-%m-%dT%H:%M:%S%z') + else: + try: + start_date = timezone.datetime.strptime(start_date_str, '%Y-%m-%dT%H:%M:%S%z') + end_date = timezone.datetime.strptime(end_date_str, '%Y-%m-%dT%H:%M:%S%z') + except ValueError: + raise InvalidException(excArgs="Date format") query_set = {"start_date_str": start_date_str, "end_date_str": end_date_str, @@ -255,6 +278,7 @@ class AccuracyViewSet(viewsets.ViewSet): "include_test": include_test, "subsidiary": subsidiary, "is_daily_report": is_daily_report, + "report_overview_duration": report_overview_duration } # if is_daily_report: # if (end_date-start_date) > timezone.timedelta(days=1): @@ -319,7 +343,7 @@ class AccuracyViewSet(viewsets.ViewSet): response = { 'report_detail': data, - 'metadata': {"subsidiary": report.subsidiary, + 'metadata': {"subsidiary": map_subsidiary_short_to_long(report.subsidiary), "start_at": report.start_at, "end_at": report.end_at}, 'page': { @@ -396,20 +420,30 @@ class AccuracyViewSet(viewsets.ViewSet): paginator = Paginator(reports, page_size) page = paginator.get_page(page_number) + data = [] for report in page: + acc_keys = ["purchase_date", "retailername", "imei_number", "avg"] + acc = {} + for key in acc_keys: + fb = report.feedback_accuracy.get(key, 0) if report.feedback_accuracy else 0 + rv = report.reviewed_accuracy.get(key, 0) if report.reviewed_accuracy else 0 + acc[key] = max([fb, rv]) data.append({ "ID": report.id, "Created Date": report.created_at, + "Start Date": report.start_at, + "End Date": report.end_at, "No. Requests": report.number_request, "Status": report.status, - "Purchase Date Acc": report.reviewed_accuracy.get("purchase_date", None) if report.reviewed_accuracy else None, - "Retailer Acc": report.feedback_accuracy.get("retailername", None) if report.reviewed_accuracy else None, - "IMEI Acc": report.feedback_accuracy.get("imei_number", None) if report.reviewed_accuracy else None, - "Avg. Accuracy": report.feedback_accuracy.get("avg", None) if report.reviewed_accuracy else None, + "Purchase Date Acc": acc["purchase_date"], + "Retailer Acc": acc["retailername"], + "IMEI Acc": acc["imei_number"], + "Avg. Accuracy": acc["avg"], "Avg. Client Request Time": report.average_client_time.get("avg", 0) if report.average_client_time else 0, "Avg. OCR Processing Time": report.average_OCR_time.get("avg", 0) if report.average_OCR_time else 0, "report_id": report.report_id, + "Subsidiary": map_subsidiary_short_to_long(report.subsidiary), }) response = { @@ -427,104 +461,79 @@ class AccuracyViewSet(viewsets.ViewSet): @extend_schema( parameters=[ OpenApiParameter( - name='start_date', + name='duration', location=OpenApiParameter.QUERY, - description='Start date (YYYY-mm-DDTHH:MM:SSZ)', - type=OpenApiTypes.DATE, - default='2023-01-02T00:00:00+0700', - ), - OpenApiParameter( - name='end_date', - location=OpenApiParameter.QUERY, - description='End date (YYYY-mm-DDTHH:MM:SSZ)', - type=OpenApiTypes.DATE, - default='2024-01-10T00:00:00+0700', + description='one of [30d, 7d]', + type=OpenApiTypes.STR, + default='30d', ), OpenApiParameter( name='subsidiary', location=OpenApiParameter.QUERY, description='Subsidiary', type=OpenApiTypes.STR, - ), - OpenApiParameter( - name='page', - location=OpenApiParameter.QUERY, - description='Page number', - type=OpenApiTypes.INT, - required=False - ), - OpenApiParameter( - name='page_size', - location=OpenApiParameter.QUERY, - description='Number of items per page', - type=OpenApiTypes.INT, - required=False - ), + ) ], responses=None, tags=['Accuracy'] ) @action(detail=False, url_path="overview", methods=["GET"]) def overview(self, request): if request.method == 'GET': - subsidiary = request.GET.get('subsidiary', None) - start_date_str = request.GET.get('start_date', "") - end_date_str = request.GET.get('end_date', "") - page_number = int(request.GET.get('page', 1)) - page_size = int(request.GET.get('page_size', 10)) + subsidiary = request.GET.get('subsidiary', "ALL") + duration = request.GET.get('duration', "") - base_query = Q() - - if start_date_str and end_date_str: - try: - start_date = timezone.datetime.strptime(start_date_str, '%Y-%m-%dT%H:%M:%S%z') - end_date = timezone.datetime.strptime(end_date_str, '%Y-%m-%dT%H:%M:%S%z') - except ValueError: - raise InvalidException(excArgs="Date format") - else: - end_date = timezone.datetime.now() - start_date = end_date - timezone.timedelta(days=30) - base_query &= Q(created_at__range=(start_date, end_date)) - - if subsidiary: - base_query &= Q(subsidiary=subsidiary) - base_query &= Q(is_daily_report=True) - reports = Report.objects.filter(base_query).order_by('start_at').reverse() - - paginator = Paginator(reports, page_size) - page = paginator.get_page(page_number) - - data = [] - this_month_report = MonthReportAccumulate() - for report in page: - res = this_month_report.add(report) - if not(res): - _, _data, total = this_month_report() - data += [total] - data += _data - this_month_report = MonthReportAccumulate() - this_month_report.add(report) - _, _data, total = this_month_report() - data += [total] - data += _data - # Generate xlsx file - # workbook = dict2xlsx(data, _type="report") - # tmp_file = f"/tmp/{str(uuid.uuid4())}.xlsx" - # os.makedirs(os.path.dirname(tmp_file), exist_ok=True) - # workbook.save(tmp_file) - # c_connector.remove_local_file((tmp_file, "fake_request_id")) + subsidiary = map_subsidiary_long_to_short(subsidiary) + # Retrive data from Redis + key = f"{subsidiary}_{duration}" + data = json.loads(redis_client.get_specific_cache(settings.OVERVIEW_REPORT_ROOT, key)).get("data", []) response = { - # 'file': load_xlsx_file(), 'overview_data': data, - 'page': { - 'number': page.number, - 'total_pages': page.paginator.num_pages, - 'count': page.paginator.count, - } } return JsonResponse(response, status=200) return JsonResponse({'error': 'Invalid request method.'}, status=405) + + @extend_schema( + parameters=[ + OpenApiParameter( + name='duration', + location=OpenApiParameter.QUERY, + description='one of [30d, 7d]', + type=OpenApiTypes.STR, + default='30d', + ), + OpenApiParameter( + name='subsidiary', + location=OpenApiParameter.QUERY, + description='Subsidiary', + type=OpenApiTypes.STR, + ) + ], + responses=None, tags=['Accuracy'] + ) + @action(detail=False, url_path="overview_download_file", methods=["GET"]) + def overview_download_file(self, request): + if request.method == 'GET': + subsidiary = request.GET.get('subsidiary', "ALL") + duration = request.GET.get('duration', "") + + subsidiary = map_subsidiary_long_to_short(subsidiary) + + s3_key = f"{subsidiary}_{duration}.xlsx" + + tmp_file = "/tmp/" + s3_key + os.makedirs("/tmp", exist_ok=True) + download_from_S3("report/" + settings.OVERVIEW_REPORT_ROOT + "/" + s3_key, tmp_file) + file = open(tmp_file, 'rb') + response = FileResponse(file, status=200) + + # Set the content type and content disposition headers + response['Content-Type'] = 'application/octet-stream' + response['Content-Disposition'] = 'attachment; filename="{0}"'.format(os.path.basename(tmp_file)) + return response + + return JsonResponse({'error': 'Invalid request method.'}, status=405) @extend_schema( parameters=[], diff --git a/cope2n-api/fwd_api/celery_worker/client_connector.py b/cope2n-api/fwd_api/celery_worker/client_connector.py index a2fa2ac..5394c8e 100755 --- a/cope2n-api/fwd_api/celery_worker/client_connector.py +++ b/cope2n-api/fwd_api/celery_worker/client_connector.py @@ -65,6 +65,7 @@ class CeleryConnector: return self.send_task('upload_obj_to_s3', args) def remove_local_file(self, args): return self.send_task('remove_local_file', args, countdown=280) # nearest execution of this task in 280 seconds + def process_fi(self, args): return self.send_task('process_fi_invoice', args) def process_fi_result(self, args): diff --git a/cope2n-api/fwd_api/celery_worker/process_report_tasks.py b/cope2n-api/fwd_api/celery_worker/process_report_tasks.py index 8d07a44..28a1e06 100644 --- a/cope2n-api/fwd_api/celery_worker/process_report_tasks.py +++ b/cope2n-api/fwd_api/celery_worker/process_report_tasks.py @@ -6,12 +6,16 @@ from ..utils import s3 as S3Util from ..utils.accuracy import update_temp_accuracy, IterAvg, calculate_and_save_subcription_file, count_transactions, extract_report_detail_list, calculate_a_request, ReportAccumulateByRequest from ..utils.file import dict2xlsx, save_workbook_file, save_report_to_S3 from ..utils import time_stuff +from ..utils.redis import RedisUtils from django.utils import timezone from django.db.models import Q +import json +import copy from celery.utils.log import get_task_logger from fwd import settings +redis_client = RedisUtils() logger = get_task_logger(__name__) @@ -107,7 +111,7 @@ def make_a_report(report_id, query_set): errors += request_att["err"] num_request += 1 - transaction_att = count_transactions(start_date, end_date) + transaction_att = count_transactions(start_date, end_date, report.subsidiary) # Do saving process report.number_request = num_request report.number_images = number_images @@ -237,8 +241,8 @@ def make_a_report_2(report_id, query_set): errors += request_att["err"] num_request += 1 - report_engine.save(query_set.get("is_daily_report", False), query_set["include_test"]) - transaction_att = count_transactions(start_date, end_date) + report_fine_data, _save_data = report_engine.save(report.report_id, query_set.get("is_daily_report", False), query_set["include_test"]) + transaction_att = count_transactions(start_date, end_date, report.subsidiary) # Do saving process report.number_request = num_request report.number_images = number_images @@ -276,6 +280,26 @@ def make_a_report_2(report_id, query_set): data_workbook = dict2xlsx(data, _type='report_detail') local_workbook = save_workbook_file(report.report_id + ".xlsx", report, data_workbook) s3_key=save_report_to_S3(report.report_id, local_workbook) + if query_set["is_daily_report"]: + # Save overview dashboard + # multiple accuracy by 100 + save_data = copy.deepcopy(_save_data) + for i, dat in enumerate(report_fine_data): + keys = [x for x in list(dat.keys()) if "accuracy" in x.lower()] + keys_percent = "images_quality" + for x_key in report_fine_data[i][keys_percent].keys(): + if "percent" not in x_key: + continue + report_fine_data[i][keys_percent][x_key] = report_fine_data[i][keys_percent][x_key]*100 + for key in keys: + if report_fine_data[i][key]: + for x_key in report_fine_data[i][key].keys(): + report_fine_data[i][key][x_key] = report_fine_data[i][key][x_key]*100 + data_workbook = dict2xlsx(report_fine_data, _type='report') + overview_filename = query_set["subsidiary"] + "_" + query_set["report_overview_duration"] + ".xlsx" + local_workbook = save_workbook_file(overview_filename, report, data_workbook, settings.OVERVIEW_REPORT_ROOT) + s3_key=save_report_to_S3(report.report_id, local_workbook) + redis_client.set_cache(settings.OVERVIEW_REPORT_ROOT, overview_filename.replace(".xlsx", ""), json.dumps(save_data)) except IndexError as e: print(e) diff --git a/cope2n-api/fwd_api/utils/accuracy.py b/cope2n-api/fwd_api/utils/accuracy.py index 9fc0559..6d500fd 100644 --- a/cope2n-api/fwd_api/utils/accuracy.py +++ b/cope2n-api/fwd_api/utils/accuracy.py @@ -8,6 +8,7 @@ from .ocr_utils.sbt_report import post_processing_str import uuid from fwd_api.models import SubscriptionRequest, SubscriptionRequestFile, ReportFile from ..celery_worker.client_connector import c_connector +from ..utils.file import dict2xlsx, save_workbook_file, save_report_to_S3 from django.db.models import Q from django.utils import timezone import redis @@ -21,7 +22,7 @@ valid_keys = ["retailername", "sold_to_party", "purchase_date", "imei_number"] class ReportAccumulateByRequest: def __init__(self, sub): - self.redis_client = redis.Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, decode_responses=True) + # self.redis_client = redis.Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, decode_responses=True) self.sub = sub self.current_time = None self.data = {} # {"month": [total, {"day": day_data}]} @@ -112,10 +113,7 @@ class ReportAccumulateByRequest: total["total_images"] += 1 total["images_quality"]["successful"] += 1 if not report_file.is_bad_image else 0 total["images_quality"]["bad"] += 1 if report_file.is_bad_image else 0 - - print(f"[DEBUG]: report_file.reviewed_accuracy: {report_file.reviewed_accuracy}") - print(f"[DEBUG]: report_file.feedback_accuracy: {report_file.feedback_accuracy}") - + # total["report_files"].append(report_file) if sum([len(report_file.reviewed_accuracy[x]) for x in report_file.reviewed_accuracy.keys() if "_count" not in x]) > 0 : total["average_accuracy_rate"]["imei"].add(report_file.reviewed_accuracy.get("imei_number", [])) @@ -150,6 +148,7 @@ class ReportAccumulateByRequest: day_data["images_quality"]["bad"] += 1 if report_file.is_bad_image else 0 day_data["num_imei"] += 1 if report_file.doc_type == "imei" else 0 day_data["num_invoice"] += 1 if report_file.doc_type == "invoice" else 0 + day_data["report_files"].append(report_file) if sum([len(report_file.reviewed_accuracy[x]) for x in report_file.reviewed_accuracy.keys() if "_count" not in x]) > 0 : day_data["average_accuracy_rate"]["imei"].add(report_file.reviewed_accuracy.get("imei_number", 0)) @@ -192,14 +191,13 @@ class ReportAccumulateByRequest: for report_file in report_files: self.data[this_month][0] = self.update_total(self.data[this_month][0], report_file) # Update the subtotal within the month self.data[this_month][1][this_day] = self.update_day(self.data[this_month][1][this_day], report_file) # Update the subtotal of the day - # save repot detail def count_transactions_within_day(self, date_string): # convert this day into timezone.datetime at UTC start_date = datetime.strptime(date_string, "%Y%m%d") - start_date_utc = timezone.make_aware(start_date, timezone=timezone.utc) - end_date_utc = start_date_utc + timezone.timedelta(days=1) - return count_transactions(start_date_utc, end_date_utc) + start_date_with_timezone = timezone.make_aware(start_date) + end_date_with_timezone = start_date_with_timezone + timezone.timedelta(days=1) + return count_transactions(start_date_with_timezone, end_date_with_timezone, self.sub) def save(self, root_report_id, is_daily_report=False, include_test=False): report_data = self.get() @@ -214,8 +212,8 @@ class ReportAccumulateByRequest: # save daily reports report_id = root_report_id + "_" + day start_date = datetime.strptime(day, "%Y%m%d") - start_date_utc = timezone.make_aware(start_date, timezone=timezone.utc) - end_date_utc = start_date_utc + timezone.timedelta(days=1) + start_date_with_timezone = timezone.make_aware(start_date) + end_date_with_timezone = start_date_with_timezone + timezone.timedelta(days=1) _average_OCR_time = {"invoice": self.data[month][1][day]["average_processing_time"]["invoice"](), "imei": self.data[month][1][day]["average_processing_time"]["imei"](), "invoice_count": self.data[month][1][day]["average_processing_time"]["invoice"].count, "imei_count": self.data[month][1][day]["average_processing_time"]["imei"].count} @@ -235,8 +233,8 @@ class ReportAccumulateByRequest: is_daily_report=is_daily_report, subsidiary=self.sub.lower().replace(" ", ""), include_test=include_test, - start_at=start_date_utc, - end_at=end_date_utc, + start_at=start_date_with_timezone, + end_at=end_date_with_timezone, status="Ready", number_request=report_data[month][1][day]["num_request"], number_images=report_data[month][1][day]["total_images"], @@ -250,14 +248,18 @@ class ReportAccumulateByRequest: reviewed_accuracy=acumulated_acc["reviewed_accuracy"], ) new_report.save() - # save data to redis for overview retrieval - self.redis_client.set(settings.OVERVIEW_REPORT_KEY, json.dumps(save_data)) - print(f'[DEBUG]: fine_data: {fine_data}') - + data = extract_report_detail_list(self.data[month][1][day]["report_files"], lower=True) + data_workbook = dict2xlsx(data, _type='report_detail') + local_workbook = save_workbook_file(report_id + ".xlsx", new_report, data_workbook) + s3_key=save_report_to_S3(report_id, local_workbook) + return fine_data, save_data + def get(self) -> Any: # FIXME: This looks like a junk _data = copy.deepcopy(self.data) for month in _data.keys(): + _data[month][0]["images_quality"]["successful_percent"] = _data[month][0]["images_quality"]["successful"]/_data[month][0]["total_images"] if _data[month][0]["total_images"] > 0 else 0 + _data[month][0]["images_quality"]["bad_percent"] = _data[month][0]["images_quality"]["bad"]/_data[month][0]["total_images"] if _data[month][0]["total_images"] > 0 else 0 num_transaction_imei = 0 num_transaction_invoice = 0 for day in _data[month][1].keys(): @@ -278,7 +280,11 @@ class ReportAccumulateByRequest: _data[month][1][day]["reviewed_accuracy"]["purchase_date"] = _data[month][1][day]["reviewed_accuracy"]["purchase_date"]() _data[month][1][day]["reviewed_accuracy"]["retailername"] = _data[month][1][day]["reviewed_accuracy"]["retailername"]() _data[month][1][day]["reviewed_accuracy"]["sold_to_party"] = _data[month][1][day]["reviewed_accuracy"]["sold_to_party"]() - + _data[month][1][day].pop("report_files") + + _data[month][1][day]["images_quality"]["successful_percent"] = _data[month][1][day]["images_quality"]["successful"]/_data[month][1][day]["total_images"] if _data[month][1][day]["total_images"] > 0 else 0 + _data[month][1][day]["images_quality"]["bad_percent"] = _data[month][1][day]["images_quality"]["bad"]/_data[month][1][day]["total_images"] if _data[month][1][day]["total_images"] > 0 else 0 + _data[month][0]["usage"]["imei"] = num_transaction_imei _data[month][0]["usage"]["invoice"] = num_transaction_invoice _data[month][0]["average_accuracy_rate"]["imei"] = _data[month][0]["average_accuracy_rate"]["imei"]() @@ -535,9 +541,11 @@ def extract_report_detail_list(report_detail_list, lower=False, in_percent=True) data[i][key] = data[i][key]*100 return data -def count_transactions(start_date, end_date): +def count_transactions(start_date, end_date, subsidiary="all"): base_query = Q(created_at__range=(start_date, end_date)) base_query &= Q(is_test_request=False) + if subsidiary and subsidiary.lower().replace(" ", "")!="all": + base_query &= Q(redemption_id__startswith=subsidiary) transaction_att = {} print(f"[DEBUG]: atracting transactions attribute...") diff --git a/cope2n-api/fwd_api/utils/file.py b/cope2n-api/fwd_api/utils/file.py index 8d3b4a8..f7434d9 100644 --- a/cope2n-api/fwd_api/utils/file.py +++ b/cope2n-api/fwd_api/utils/file.py @@ -201,10 +201,13 @@ def save_feedback_file(file_name: str, rq: FeedbackRequest, uploaded_file: dict) csvfile.write(file_contents) return file_path -def save_workbook_file(file_name: str, rp: Report, workbook): +def save_workbook_file(file_name: str, rp: Report, workbook, prefix=""): report_id = str(rp.report_id) - folder_path = os.path.join(settings.MEDIA_ROOT, "report", report_id) + if not prefix: + folder_path = os.path.join(settings.MEDIA_ROOT, "report", report_id) + else: + folder_path = os.path.join(settings.MEDIA_ROOT, "report", prefix) os.makedirs(folder_path, exist_ok = True) file_path = os.path.join(folder_path, file_name) @@ -399,12 +402,17 @@ def build_media_url_v2(media_id: str, user_id: int, sub_id: int, u_sync_id: str) def get_value(_dict, keys): keys = keys.split('.') value = _dict - for key in keys: - if not key in value.keys(): - return "-" - else: - value = value.get(key, {}) - + try: + for key in keys: + if not key in value.keys(): + return "-" + else: + value = value.get(key, {}) + except Exception as e: + print(f"[ERROR]: {e}") + print(f"[ERROR]: value: {value}") + print(f"[ERROR]: keys: {keys}") + if not value: return "-" elif isinstance(value, list): @@ -486,13 +494,23 @@ def dict2xlsx(input: json, _type='report'): ws[key + str(start_index)].border = border if _type == 'report': - ws[key + str(start_index)].font = font_black_bold - if key_index == 0 or (key_index >= 9 and key_index <= 15): - ws[key + str(start_index)].fill = fill_gray - elif key_index == 1: - ws[key + str(start_index)].fill = fill_green - elif key_index >= 4 and key_index <= 8: - ws[key + str(start_index)].fill = fill_yellow + if subtotal['subs'] == '+': + ws[key + str(start_index)].font = font_black_bold + if key_index == 0 or (key_index >= 9 and key_index <= 15): + ws[key + str(start_index)].fill = fill_gray + elif key_index == 1: + ws[key + str(start_index)].fill = fill_green + elif key_index >= 4 and key_index <= 8: + ws[key + str(start_index)].fill = fill_yellow + else: + if 'average_accuracy_rate' in mapping[key] and type(value) in [int, float] and value < 95: + ws[key + str(start_index)].style = normal_cell_red + elif 'average_processing_time' in mapping[key] and type(value) in [int, float] and value > 2.0: + ws[key + str(start_index)].style = normal_cell_red + elif 'bad_percent' in mapping[key] and type(value) in [int, float] and value > 10: + ws[key + str(start_index)].style = normal_cell_red + else : + ws[key + str(start_index)].style = normal_cell elif _type == 'report_detail': if 'accuracy' in mapping[key] and type(value) in [int, float] and value < 75: ws[key + str(start_index)].style = normal_cell_red @@ -502,21 +520,5 @@ def dict2xlsx(input: json, _type='report'): ws[key + str(start_index)].style = normal_cell start_index += 1 - - if 'data' in subtotal.keys(): - for record in subtotal['data']: - for key in mapping.keys(): - value = get_value(record, mapping[key]) - ws[key + str(start_index)] = value - if 'average_accuracy_rate' in mapping[key] and type(value) in [int, float] and value < 95: - ws[key + str(start_index)].style = normal_cell_red - elif 'average_processing_time' in mapping[key] and type(value) in [int, float] and value > 2.0: - ws[key + str(start_index)].style = normal_cell_red - elif 'bad_percent' in mapping[key] and type(value) in [int, float] and value > 10: - ws[key + str(start_index)].style = normal_cell_red - else : - ws[key + str(start_index)].style = normal_cell - - start_index += 1 return wb diff --git a/cope2n-api/fwd_api/utils/redis.py b/cope2n-api/fwd_api/utils/redis.py index fc24004..d8d74e1 100644 --- a/cope2n-api/fwd_api/utils/redis.py +++ b/cope2n-api/fwd_api/utils/redis.py @@ -13,8 +13,8 @@ class RedisUtils: request_id: str data: dict image_index: int - """request_id - self.redis_client.hset(, image_index, json.dumps(data)) + """ + self.redis_client.hset(request_id, image_index, json.dumps(data)) self.redis_client.expire(request_id, 3600) def get_all_cache(self, request_id): @@ -22,6 +22,9 @@ class RedisUtils: for key, value in self.redis_client.hgetall(request_id).items(): resutlt[key] = json.loads(value) return resutlt + + def get_specific_cache(self, request_id, key): + return json.loads(self.redis_client.hget(request_id, key)) def get_size(self, request_id): return self.redis_client.hlen(request_id) diff --git a/cope2n-api/fwd_api/utils/subsidiary.py b/cope2n-api/fwd_api/utils/subsidiary.py new file mode 100644 index 0000000..d10c879 --- /dev/null +++ b/cope2n-api/fwd_api/utils/subsidiary.py @@ -0,0 +1,11 @@ +from fwd.settings import SUBS + +def map_subsidiary_long_to_short(long_sub): + short_sub = SUBS.get(long_sub.upper(), "all") + return short_sub.upper() + +def map_subsidiary_short_to_long(short_sub): + for k, v in SUBS.items(): + if v == short_sub.upper(): + return k + return "ALL" \ No newline at end of file diff --git a/cope2n-api/scripts/script.py b/cope2n-api/scripts/script.py new file mode 100644 index 0000000..713c925 --- /dev/null +++ b/cope2n-api/scripts/script.py @@ -0,0 +1,68 @@ +import os +import time +import requests +from datetime import datetime + +# Get the proxy URL from the environment variable +interval = 60*60*1 # 1 minute +update_cost = 60*3 +proxy_url = os.getenv('PROXY', "localhost") + +# Define the login API URL +login_url = f'{proxy_url}/api/ctel/login/' +login_token = None + +# Define the login credentials +login_credentials = { + 'username': 'sbt', + 'password': '7Eg4AbWIXDnufgn' +} + +# Define the command to call the update API +update_url = f'{proxy_url}/api/ctel/make_report/' +update_params = { + 'is_daily_report': 'true', + 'report_overview_duration': '', + 'subsidiary': None +} + +"report_overview_duration" + +def update_report(login_token, report_overview_duration=["30d", "7d"], subsidiary=["all", "SEAU", "SESP", "SME", "SEPCO", "TSE", "SEIN"]): + headers = {'Authorization': login_token} + for dur in report_overview_duration: + for sub in subsidiary: + update_params["report_overview_duration"] = dur + update_params["subsidiary"] = sub + update_response = requests.get(update_url, params=update_params, headers=headers) + print("[INFO]: update_response at {} by {} - {} with status {}".format(datetime.now(), dur, sub, update_response.status_code)) + update_response.raise_for_status() + time.sleep(update_cost) + +# Define the interval in seconds between API calls +# time.sleep(60) + +while True: + # Call the login API and retrieve the login token + if not login_token: + login_response = requests.post(login_url, data=login_credentials) + # login_response.raise_for_status() + if login_response.status_code == 200: + login_token = login_response.json()['token'] + print("[INFO] relogged in at {}".format(datetime.now())) + + # Call the update API + try: + update_report(login_token) + except Exception as e: + print(f"[ERROR]: {e}") + print(f"[ERROR]: Failed to update_response, retrying...") + login_response = requests.post(login_url, data=login_credentials) + # login_response.raise_for_status() + if login_response.status_code == 200: + login_token = login_response.json()['token'] + print("[INFO] relogged in at {}".format(datetime.now())) + update_report(login_token) + + # Wait for the specified interval + time.sleep(interval) \ No newline at end of file diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 2ff79e2..1d9e946 100755 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -175,6 +175,7 @@ services: working_dir: /app command: sh -c "celery -A fwd_api.celery_worker.worker worker -l INFO -c 5" + # command: bash -c "tail -f > /dev/null" # Back-end persistent db-sbt: