diff --git a/cope2n-api/fwd_api/api/accuracy_view.py b/cope2n-api/fwd_api/api/accuracy_view.py index 8f2c408..54f0a3c 100755 --- a/cope2n-api/fwd_api/api/accuracy_view.py +++ b/cope2n-api/fwd_api/api/accuracy_view.py @@ -29,80 +29,81 @@ import copy redis_client = RedisUtils() + class AccuracyViewSet(viewsets.ViewSet): lookup_field = "username" @extend_schema( - parameters=[ - OpenApiParameter( - name='start_date', - location=OpenApiParameter.QUERY, - description='Start date (YYYY-mm-DDTHH:MM:SSZ)', - type=OpenApiTypes.DATE, - default='2023-01-02T00:00:00+0700', - ), - OpenApiParameter( - name='end_date', - location=OpenApiParameter.QUERY, - description='End date (YYYY-mm-DDTHH:MM:SSZ)', - type=OpenApiTypes.DATE, - default='2024-01-10T00:00:00+0700', - ), - OpenApiParameter( - name='includes_test', - location=OpenApiParameter.QUERY, - description='Whether to include test record or not', - type=OpenApiTypes.BOOL, - ), - OpenApiParameter( - name='is_reviewed', - location=OpenApiParameter.QUERY, - description='Which records to be query', - type=OpenApiTypes.STR, - enum=['reviewed', 'not_reviewed', 'all'], - ), - OpenApiParameter( - name='subsidiary', - location=OpenApiParameter.QUERY, - description='Which subsidiary to be included', - type=OpenApiTypes.STR, - enum=list(settings.SUBS.keys()), - ), - OpenApiParameter( - name='request_id', - location=OpenApiParameter.QUERY, - description='Specific request id', - type=OpenApiTypes.STR, - ), - OpenApiParameter( - name='with_redemption_id', - location=OpenApiParameter.QUERY, - description='Specific redemption id', - type=OpenApiTypes.BOOL, - ), - OpenApiParameter( - name='page', - location=OpenApiParameter.QUERY, - description='Page number', - type=OpenApiTypes.INT, - required=False - ), - OpenApiParameter( - name='page_size', - location=OpenApiParameter.QUERY, - description='Number of items per page', - type=OpenApiTypes.INT, - required=False - ), - OpenApiParameter( - name='max_accuracy', - location=OpenApiParameter.QUERY, - description='Return requests with acccuracy smaller than this number', - type=OpenApiTypes.FLOAT, - required=False - ), - ], - responses=None, tags=['Accuracy'] + parameters=[ + OpenApiParameter( + name='start_date', + location=OpenApiParameter.QUERY, + description='Start date (YYYY-mm-DDTHH:MM:SSZ)', + type=OpenApiTypes.DATE, + default='2023-01-02T00:00:00+0700', + ), + OpenApiParameter( + name='end_date', + location=OpenApiParameter.QUERY, + description='End date (YYYY-mm-DDTHH:MM:SSZ)', + type=OpenApiTypes.DATE, + default='2024-01-10T00:00:00+0700', + ), + OpenApiParameter( + name='includes_test', + location=OpenApiParameter.QUERY, + description='Whether to include test record or not', + type=OpenApiTypes.BOOL, + ), + OpenApiParameter( + name='is_reviewed', + location=OpenApiParameter.QUERY, + description='Which records to be query', + type=OpenApiTypes.STR, + enum=['reviewed', 'not_reviewed', 'all'], + ), + OpenApiParameter( + name='subsidiary', + location=OpenApiParameter.QUERY, + description='Which subsidiary to be included', + type=OpenApiTypes.STR, + enum=list(settings.SUBS.keys()), + ), + OpenApiParameter( + name='request_id', + location=OpenApiParameter.QUERY, + description='Specific request id', + type=OpenApiTypes.STR, + ), + OpenApiParameter( + name='with_redemption_id', + location=OpenApiParameter.QUERY, + description='Specific redemption id', + type=OpenApiTypes.BOOL, + ), + OpenApiParameter( + name='page', + location=OpenApiParameter.QUERY, + description='Page number', + type=OpenApiTypes.INT, + required=False + ), + OpenApiParameter( + name='page_size', + location=OpenApiParameter.QUERY, + description='Number of items per page', + type=OpenApiTypes.INT, + required=False + ), + OpenApiParameter( + name='max_accuracy', + location=OpenApiParameter.QUERY, + description='Return requests with acccuracy smaller than this number', + type=OpenApiTypes.FLOAT, + required=False + ), + ], + responses=None, tags=['Accuracy'] ) @action(detail=False, url_path="request_list", methods=["GET"]) def get_request_list(self, request): @@ -117,13 +118,16 @@ class AccuracyViewSet(viewsets.ViewSet): include_test = request.GET.get('includes_test', False) subsidiary = request.GET.get("subsidiary", "all") max_accuracy = float(request.GET.get("max_accuracy", 100)) - subsidiary = map_subsidiary_long_to_short(subsidiary) + # subsidiary = map_subsidiary_long_to_short(subsidiary) base_query = Q(status=200) if start_date_str or end_date_str: try: - start_date = timezone.datetime.strptime(start_date_str, '%Y-%m-%d') # We care only about day precision only - end_date = timezone.datetime.strptime(end_date_str, '%Y-%m-%d') + # We care only about day precision only + start_date = timezone.datetime.strptime( + start_date_str, '%Y-%m-%d') + end_date = timezone.datetime.strptime( + end_date_str, '%Y-%m-%d') end_date = end_date + timezone.timedelta(days=1) # Round: # end_date_str to the beginning of the next day @@ -131,16 +135,17 @@ class AccuracyViewSet(viewsets.ViewSet): start_date = timezone.make_aware(start_date) end_date = timezone.make_aware(end_date) - start_date_str = start_date.strftime('%Y-%m-%dT%H:%M:%S%z') # inside logic will include second precision with timezone for calculation + # inside logic will include second precision with timezone for calculation + start_date_str = start_date.strftime('%Y-%m-%dT%H:%M:%S%z') end_date_str = end_date.strftime('%Y-%m-%dT%H:%M:%S%z') base_query &= Q(created_at__range=(start_date, end_date)) except Exception as e: raise InvalidException(excArgs="Date format") - - if request_id: + + if request_id: base_query &= Q(request_id=request_id) if isinstance(with_redemption_id, str): - with_redemption_id = True if with_redemption_id=="true" else False + with_redemption_id = True if with_redemption_id == "true" else False if with_redemption_id: base_query &= Q(redemption_id__isnull=False) else: @@ -151,7 +156,7 @@ class AccuracyViewSet(viewsets.ViewSet): else: base_query &= Q(redemption_id__isnull=True) if isinstance(include_test, str): - include_test = True if include_test=="true" else False + include_test = True if include_test == "true" else False if not include_test: base_query &= Q(is_test_request=False) elif isinstance(include_test, bool): @@ -168,19 +173,22 @@ class AccuracyViewSet(viewsets.ViewSet): if subsidiary.lower() != "seao": if subsidiary not in list(settings.SUBS.keys()): raise InvalidException(excArgs="subsidiary") - if subsidiary and subsidiary.lower().replace(" ", "")!="all": - base_query &= Q(redemption_id__startswith=map_subsidiary_long_to_short(subsidiary)) + if subsidiary and subsidiary.lower().replace(" ", "") != "all": + base_query &= Q( + redemption_id__startswith=map_subsidiary_long_to_short(subsidiary)) if isinstance(max_accuracy, float): - base_query &= Q(raw_accuracy__lt=(max_accuracy/100)) | Q(raw_accuracy__isnull=True) + base_query &= Q(raw_accuracy__lt=( + max_accuracy/100)) | Q(raw_accuracy__isnull=True) - subscription_requests = SubscriptionRequest.objects.filter(base_query).order_by('created_at') + subscription_requests = SubscriptionRequest.objects.filter( + base_query).order_by('created_at') request_count = subscription_requests.count() paginator = Paginator(subscription_requests, page_size) page = paginator.get_page(page_number) - + data = [] for rq in page: imeis = [] @@ -189,17 +197,22 @@ class AccuracyViewSet(viewsets.ViewSet): try: if rq.reviewed_result is not None: imeis = rq.reviewed_result.get("imei_number", []) - purchase_date = rq.reviewed_result.get("purchase_date", []) + purchase_date = rq.reviewed_result.get( + "purchase_date", []) retailer = rq.reviewed_result.get("retailername", "") - elif rq.feedback_result is not None : + elif rq.feedback_result is not None: imeis = rq.feedback_result.get("imei_number", []) - purchase_date = rq.feedback_result.get("purchase_date", []) + purchase_date = rq.feedback_result.get( + "purchase_date", []) retailer = rq.feedback_result.get("retailername", "") elif rq.predict_result is not None: if rq.predict_result.get("status", 404) == 200: - imeis = rq.predict_result.get("content", {}).get("document", [])[0].get("content", [])[3].get("value", []) - purchase_date = rq.predict_result.get("content", {}).get("document", [])[0].get("content", [])[2].get("value", []) - retailer = rq.predict_result.get("content", {}).get("document", [])[0].get("content", [])[0].get("value", []) + imeis = rq.predict_result.get("content", {}).get("document", [])[ + 0].get("content", [])[3].get("value", []) + purchase_date = rq.predict_result.get("content", {}).get( + "document", [])[0].get("content", [])[2].get("value", []) + retailer = rq.predict_result.get("content", {}).get("document", [])[ + 0].get("content", [])[0].get("value", []) except Exception as e: print(f"[ERROR]: {e}") print(f"[ERROR]: {rq}") @@ -230,10 +243,10 @@ class AccuracyViewSet(viewsets.ViewSet): return JsonResponse(response) return JsonResponse({'error': 'Invalid request method.'}, status=405) - + @extend_schema( - request=ReportCreationSerializer(), - responses=None, tags=['Accuracy'] + request=ReportCreationSerializer(), + responses=None, tags=['Accuracy'] ) @action(detail=False, url_path="make_report", methods=["POST"]) def make_report(self, request): @@ -246,11 +259,12 @@ class AccuracyViewSet(viewsets.ViewSet): include_test = request.data.get('include_test', False) subsidiary = request.data.get("subsidiary", "all") is_daily_report = request.data.get('is_daily_report', False) - report_overview_duration = request.data.get("report_overview_duration", "") + report_overview_duration = request.data.get( + "report_overview_duration", "") report_type = request.data.get("report_type", "accuracy") subsidiary = map_subsidiary_long_to_short(subsidiary) - - if report_type=="billing" and subsidiary.lower().replace(" ", "") not in settings.SUB_FOR_BILLING: + + if report_type == "billing" and subsidiary.lower().replace(" ", "") not in settings.SUB_FOR_BILLING: raise InvalidException(excArgs="Subsidiary for billing report") if is_daily_report: @@ -261,37 +275,45 @@ class AccuracyViewSet(viewsets.ViewSet): start_date = end_date - timezone.timedelta(days=30) else: start_date = end_date - timezone.timedelta(days=7) - start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) - start_date_str = start_date.strftime('%Y-%m-%dT%H:%M:%S%z') # inside logic will include second precision with timezone for calculation + start_date = start_date.replace( + hour=0, minute=0, second=0, microsecond=0) + # inside logic will include second precision with timezone for calculation + start_date_str = start_date.strftime('%Y-%m-%dT%H:%M:%S%z') end_date_str = end_date.strftime('%Y-%m-%dT%H:%M:%S%z') else: try: - start_date = timezone.datetime.strptime(start_date_str, '%Y-%m-%d') # We care only about day precision only - end_date = timezone.datetime.strptime(end_date_str, '%Y-%m-%d') + # We care only about day precision only + start_date = timezone.datetime.strptime( + start_date_str, '%Y-%m-%d') + end_date = timezone.datetime.strptime( + end_date_str, '%Y-%m-%d') # Round: # end_date_str to the beginning of the next day # start_date_str to the start of the date start_date = timezone.make_aware(start_date) end_date = timezone.make_aware(end_date) - start_date_str = start_date.strftime('%Y-%m-%dT%H:%M:%S%z') # inside logic will include second precision with timezone for calculation - end_date_str = (end_date + timezone.timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S%z') + # inside logic will include second precision with timezone for calculation + start_date_str = start_date.strftime('%Y-%m-%dT%H:%M:%S%z') + end_date_str = ( + end_date + timezone.timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S%z') except ValueError: raise InvalidException(excArgs="Date format") query_set = {"start_date_str": start_date_str, - "end_date_str": end_date_str, - "request_id": request_id, - "redemption_id": redemption_id, - "is_reviewed": is_reviewed, - "include_test": include_test, - "subsidiary": subsidiary, - "is_daily_report": is_daily_report, - "report_overview_duration": report_overview_duration, - "report_type": report_type, - } + "end_date_str": end_date_str, + "request_id": request_id, + "redemption_id": redemption_id, + "is_reviewed": is_reviewed, + "include_test": include_test, + "subsidiary": subsidiary, + "is_daily_report": is_daily_report, + "report_overview_duration": report_overview_duration, + "report_type": report_type, + } - report_id = "report" + "_" + timezone.datetime.now().strftime("%Y%m%d%H%M%S%z") + "_" + uuid.uuid4().hex + report_id = "report" + "_" + timezone.datetime.now().strftime("%Y%m%d%H%M%S%z") + \ + "_" + uuid.uuid4().hex new_report: Report = Report( report_id=report_id, is_daily_report=is_daily_report, @@ -311,29 +333,29 @@ class AccuracyViewSet(viewsets.ViewSet): # Redundant, will be removed by 19 March 2024 @extend_schema( - parameters=[ - OpenApiParameter( - name='report_id', - location=OpenApiParameter.QUERY, - description='Specific report id', - type=OpenApiTypes.STR, - ), - OpenApiParameter( - name='page', - location=OpenApiParameter.QUERY, - description='Page number', - type=OpenApiTypes.INT, - required=False - ), - OpenApiParameter( - name='page_size', - location=OpenApiParameter.QUERY, - description='Number of items per page', - type=OpenApiTypes.INT, - required=False - ), - ], - responses=None, tags=['Accuracy'] + parameters=[ + OpenApiParameter( + name='report_id', + location=OpenApiParameter.QUERY, + description='Specific report id', + type=OpenApiTypes.STR, + ), + OpenApiParameter( + name='page', + location=OpenApiParameter.QUERY, + description='Page number', + type=OpenApiTypes.INT, + required=False + ), + OpenApiParameter( + name='page_size', + location=OpenApiParameter.QUERY, + description='Number of items per page', + type=OpenApiTypes.INT, + required=False + ), + ], + responses=None, tags=['Accuracy'] ) @action(detail=False, url_path="report_detail_list", methods=["GET"]) def get_report_detail_list(self, request): @@ -349,7 +371,7 @@ class AccuracyViewSet(viewsets.ViewSet): page = paginator.get_page(page_number) data = extract_report_detail_list(page, in_percent=False) - + response = { 'report_detail': data, 'metadata': {"subsidiary": map_subsidiary_short_to_long(report.subsidiary), @@ -366,48 +388,49 @@ class AccuracyViewSet(viewsets.ViewSet): return JsonResponse({'error': 'Invalid request method.'}, status=405) @extend_schema( - parameters=[ - OpenApiParameter( - name='start_date', - location=OpenApiParameter.QUERY, - description='Start date (YYYY-mm-DDTHH:MM:SSZ)', - type=OpenApiTypes.DATE, - default='2024-01-02T00:00:00+0700', - ), - OpenApiParameter( - name='end_date', - location=OpenApiParameter.QUERY, - description='End date (YYYY-mm-DDTHH:MM:SSZ)', - type=OpenApiTypes.DATE, - default='2024-01-10T00:00:00+0700', - ), - OpenApiParameter( - name='daily_report_only', - location=OpenApiParameter.QUERY, - description='Specific report id', - type=OpenApiTypes.BOOL, - ), - OpenApiParameter( - name='page', - location=OpenApiParameter.QUERY, - description='Page number', - type=OpenApiTypes.INT, - required=False - ), - OpenApiParameter( - name='page_size', - location=OpenApiParameter.QUERY, - description='Number of items per page', - type=OpenApiTypes.INT, - required=False - ), - ], - responses=None, tags=['Accuracy'] + parameters=[ + OpenApiParameter( + name='start_date', + location=OpenApiParameter.QUERY, + description='Start date (YYYY-mm-DDTHH:MM:SSZ)', + type=OpenApiTypes.DATE, + default='2024-01-02T00:00:00+0700', + ), + OpenApiParameter( + name='end_date', + location=OpenApiParameter.QUERY, + description='End date (YYYY-mm-DDTHH:MM:SSZ)', + type=OpenApiTypes.DATE, + default='2024-01-10T00:00:00+0700', + ), + OpenApiParameter( + name='daily_report_only', + location=OpenApiParameter.QUERY, + description='Specific report id', + type=OpenApiTypes.BOOL, + ), + OpenApiParameter( + name='page', + location=OpenApiParameter.QUERY, + description='Page number', + type=OpenApiTypes.INT, + required=False + ), + OpenApiParameter( + name='page_size', + location=OpenApiParameter.QUERY, + description='Number of items per page', + type=OpenApiTypes.INT, + required=False + ), + ], + responses=None, tags=['Accuracy'] ) @action(detail=False, url_path="report_list", methods=["GET"]) def get_report_list(self, request): if request.method == 'GET': - exclude_daily_report = request.GET.get('exclude_daily_report', True) + exclude_daily_report = request.GET.get( + 'exclude_daily_report', True) start_date_str = request.GET.get('start_date', "") end_date_str = request.GET.get('end_date', "") page_number = int(request.GET.get('page', 1)) @@ -418,32 +441,39 @@ class AccuracyViewSet(viewsets.ViewSet): reports = Report.objects else: try: - start_date = timezone.datetime.strptime(start_date_str, '%Y-%m-%dT%H:%M:%S%z') - end_date = timezone.datetime.strptime(end_date_str, '%Y-%m-%dT%H:%M:%S%z') + start_date = timezone.datetime.strptime( + start_date_str, '%Y-%m-%dT%H:%M:%S%z') + end_date = timezone.datetime.strptime( + end_date_str, '%Y-%m-%dT%H:%M:%S%z') except ValueError: raise InvalidException(excArgs="Date format") base_query = Q(created_at__range=(start_date, end_date)) reports = Report.objects.filter(base_query) - if exclude_daily_report: + if exclude_daily_report: reports = Report.objects.filter(is_daily_report=False) reports = reports.order_by('created_at').reverse() - + paginator = Paginator(reports, page_size) page = paginator.get_page(page_number) data = [] for report in page: - acc_keys = ["purchase_date", "retailername", "invoice_no", "imei_number", "avg"] + acc_keys = ["purchase_date", "retailername", + "invoice_no", "imei_number", "avg"] acc = {} for key in acc_keys: - fb = report.feedback_accuracy.get(key, 0) if report.feedback_accuracy else 0 - rv = report.reviewed_accuracy.get(key, 0) if report.reviewed_accuracy else 0 + fb = report.feedback_accuracy.get( + key, 0) if report.feedback_accuracy else 0 + rv = report.reviewed_accuracy.get( + key, 0) if report.reviewed_accuracy else 0 if report.report_type not in ["BILLING", "billing"]: - acc[key] = report.combined_accuracy.get(key, 0) if report.combined_accuracy else max([fb, rv]) + acc[key] = report.combined_accuracy.get( + key, 0) if report.combined_accuracy else max([fb, rv]) else: acc[key] = None - processing_time = report.average_OCR_time.get("avg", None) if report.average_OCR_time else None + processing_time = report.average_OCR_time.get( + "avg", None) if report.average_OCR_time else None if processing_time and processing_time == 0: processing_time = None data.append({ @@ -478,22 +508,22 @@ class AccuracyViewSet(viewsets.ViewSet): return JsonResponse({'error': 'Invalid request method.'}, status=405) @extend_schema( - parameters=[ - OpenApiParameter( - name='duration', - location=OpenApiParameter.QUERY, - description='one of [30d, 7d]', - type=OpenApiTypes.STR, - default='30d', - ), - OpenApiParameter( - name='subsidiary', - location=OpenApiParameter.QUERY, - description='Subsidiary', - type=OpenApiTypes.STR, - ) - ], - responses=None, tags=['Accuracy'] + parameters=[ + OpenApiParameter( + name='duration', + location=OpenApiParameter.QUERY, + description='one of [30d, 7d]', + type=OpenApiTypes.STR, + default='30d', + ), + OpenApiParameter( + name='subsidiary', + location=OpenApiParameter.QUERY, + description='Subsidiary', + type=OpenApiTypes.STR, + ) + ], + responses=None, tags=['Accuracy'] ) @action(detail=False, url_path="overview_sumary", methods=["GET"]) def overview_sumary(self, request): @@ -504,7 +534,7 @@ class AccuracyViewSet(viewsets.ViewSet): subsidiary = map_subsidiary_long_to_short(_subsidiary) # Retrive data from Redis - key = f"{subsidiary}_{duration}" + key = f"{subsidiary}_{duration}" data = get_cache(key).get("data", []) response = { 'overview_data': data, @@ -514,22 +544,22 @@ class AccuracyViewSet(viewsets.ViewSet): return JsonResponse({'error': 'Invalid request method.'}, status=405) @extend_schema( - parameters=[ - OpenApiParameter( - name='duration', - location=OpenApiParameter.QUERY, - description='one of [30d, 7d]', - type=OpenApiTypes.STR, - default='30d', - ), - OpenApiParameter( - name='subsidiary', - location=OpenApiParameter.QUERY, - description='Subsidiary', - type=OpenApiTypes.STR, - ) - ], - responses=None, tags=['Accuracy'] + parameters=[ + OpenApiParameter( + name='duration', + location=OpenApiParameter.QUERY, + description='one of [30d, 7d]', + type=OpenApiTypes.STR, + default='30d', + ), + OpenApiParameter( + name='subsidiary', + location=OpenApiParameter.QUERY, + description='Subsidiary', + type=OpenApiTypes.STR, + ) + ], + responses=None, tags=['Accuracy'] ) @action(detail=False, url_path="overview", methods=["GET"]) def overview(self, request): @@ -538,7 +568,7 @@ class AccuracyViewSet(viewsets.ViewSet): duration = request.GET.get('duration', "") subsidiary = map_subsidiary_long_to_short(_subsidiary) - + if _subsidiary == "ALL": # aggregate_overview from subsibdiaries subsidiaries_to_include = list(settings.SUBS.values()) @@ -546,27 +576,32 @@ class AccuracyViewSet(viewsets.ViewSet): # subsidiaries_to_include.remove("seao") subsidiary_overview_reports = [] for sub in subsidiaries_to_include: - key = f"{sub}_{duration}" + key = f"{sub}_{duration}" try: this_overview = get_cache(key).get("data", []) if sub != "seao": - this_overview = [d for d in this_overview if d.get("subs") != "+"] + this_overview = [ + d for d in this_overview if d.get("subs") != "+"] else: for item in this_overview: if item.get("subs") == "+": - item["extraction_date"] = item["extraction_date"].replace("Subtotal ", "").replace("(", "").replace(")", "") + "-32" + item["extraction_date"] = item["extraction_date"].replace( + "Subtotal ", "").replace("(", "").replace(")", "") + "-32" subsidiary_overview_reports += this_overview except Exception as e: - print(f"[WARM]: Unable to retrive data {key} from Redis, skipping...") + print( + f"[WARM]: Unable to retrive data {key} from Redis, skipping...") data = aggregate_overview(subsidiary_overview_reports) for item in data: if item.get("subs") == "+": - item["extraction_date"] = "Subtotal (" + item["extraction_date"].replace("-32", "") + ")" + item["extraction_date"] = "Subtotal (" + item["extraction_date"].replace( + "-32", "") + ")" # Do the saving process report_fine_data = copy.deepcopy(data) for i, dat in enumerate(report_fine_data): - keys = [x for x in list(dat.keys()) if "accuracy" in x.lower()] + keys = [x for x in list(dat.keys()) + if "accuracy" in x.lower()] keys_percent = "images_quality" for x_key in report_fine_data[i][keys_percent].keys(): if "percent" not in x_key: @@ -575,44 +610,46 @@ class AccuracyViewSet(viewsets.ViewSet): for key in keys: if report_fine_data[i][key]: for x_key in report_fine_data[i][key].keys(): - report_fine_data[i][key][x_key] = report_fine_data[i][key][x_key]*100 if report_fine_data[i][key][x_key] is not None else None + report_fine_data[i][key][x_key] = report_fine_data[i][key][x_key] * \ + 100 if report_fine_data[i][key][x_key] is not None else None overview_filename = _subsidiary + "_" + duration + ".xlsx" data_workbook = dict2xlsx(report_fine_data, _type='report') - - folder_path = os.path.join(settings.MEDIA_ROOT, "report", settings.OVERVIEW_REPORT_ROOT) - os.makedirs(folder_path, exist_ok = True) + + folder_path = os.path.join( + settings.MEDIA_ROOT, "report", settings.OVERVIEW_REPORT_ROOT) + os.makedirs(folder_path, exist_ok=True) file_path = os.path.join(folder_path, overview_filename) data_workbook.save(file_path) - s3_key=save_report_to_S3(None, file_path) - # redis_client.set_cache(settings.OVERVIEW_REPORT_ROOT, overview_filename.replace(".xlsx", ""), json.dumps(save_data)) + s3_key = save_report_to_S3(None, file_path) + # redis_client.set_cache(settings.OVERVIEW_REPORT_ROOT, overview_filename.replace(".xlsx", ""), json.dumps(save_data)) else: # Retrive data from Redis - key = f"{subsidiary}_{duration}" + key = f"{subsidiary}_{duration}" data = get_cache(key).get("data", []) response = { 'overview_data': data, } return JsonResponse(response, status=200) return JsonResponse({'error': 'Invalid request method.'}, status=405) - + @extend_schema( - parameters=[ - OpenApiParameter( - name='duration', - location=OpenApiParameter.QUERY, - description='one of [30d, 7d]', - type=OpenApiTypes.STR, - default='30d', - ), - OpenApiParameter( - name='subsidiary', - location=OpenApiParameter.QUERY, - description='Subsidiary', - type=OpenApiTypes.STR, - ) - ], - responses=None, tags=['Accuracy'] + parameters=[ + OpenApiParameter( + name='duration', + location=OpenApiParameter.QUERY, + description='one of [30d, 7d]', + type=OpenApiTypes.STR, + default='30d', + ), + OpenApiParameter( + name='subsidiary', + location=OpenApiParameter.QUERY, + description='Subsidiary', + type=OpenApiTypes.STR, + ) + ], + responses=None, tags=['Accuracy'] ) @action(detail=False, url_path="overview_download_file", methods=["GET"]) def overview_download_file(self, request): @@ -626,20 +663,22 @@ class AccuracyViewSet(viewsets.ViewSet): tmp_file = "/tmp/" + s3_key os.makedirs("/tmp", exist_ok=True) - download_from_S3("report/" + settings.OVERVIEW_REPORT_ROOT + "/" + s3_key, tmp_file) + download_from_S3( + "report/" + settings.OVERVIEW_REPORT_ROOT + "/" + s3_key, tmp_file) file = open(tmp_file, 'rb') response = FileResponse(file, status=200) # Set the content type and content disposition headers response['Content-Type'] = 'application/octet-stream' - response['Content-Disposition'] = 'attachment; filename="{0}"'.format(os.path.basename(tmp_file)) + response['Content-Disposition'] = 'attachment; filename="{0}"'.format( + os.path.basename(tmp_file)) return response return JsonResponse({'error': 'Invalid request method.'}, status=405) @extend_schema( - parameters=[], - responses=None, tags=['Accuracy'] + parameters=[], + responses=None, tags=['Accuracy'] ) @action(detail=False, url_path=r"get_report_file/(?P[\w\-]+)", methods=["GET"]) def get_report_file(self, request, report_id): @@ -659,8 +698,10 @@ class AccuracyViewSet(viewsets.ViewSet): if not report.S3_dashboard_file_name and request.query_params["report_expression"] != "detail": raise NotFoundException(excArgs="S3 dashboard file name") - file_name = report.S3_file_name if request.query_params["report_expression"] == "detail" else report.S3_dashboard_file_name - tmp_file = "/tmp/" + request.query_params["report_expression"] + "_" + report.subsidiary + "_" + report.start_at.astimezone(target_timezone).strftime("%Y%m%d") + "_" + report.end_at.astimezone(target_timezone).strftime("%Y%m%d") + "_created_on_" + report.created_at.astimezone(target_timezone).strftime("%Y%m%d") + ".xlsx" + file_name = report.S3_file_name if request.query_params[ + "report_expression"] == "detail" else report.S3_dashboard_file_name + tmp_file = "/tmp/" + request.query_params["report_expression"] + "_" + report.subsidiary + "_" + report.start_at.astimezone(target_timezone).strftime( + "%Y%m%d") + "_" + report.end_at.astimezone(target_timezone).strftime("%Y%m%d") + "_created_on_" + report.created_at.astimezone(target_timezone).strftime("%Y%m%d") + ".xlsx" os.makedirs("/tmp", exist_ok=True) download_from_S3(file_name, tmp_file) @@ -669,7 +710,8 @@ class AccuracyViewSet(viewsets.ViewSet): # Set the content type and content disposition headers response['Content-Type'] = 'application/octet-stream' - response['Content-Disposition'] = 'attachment; filename="{0}"'.format(os.path.basename(tmp_file)) + response['Content-Disposition'] = 'attachment; filename="{0}"'.format( + os.path.basename(tmp_file)) return response return JsonResponse({'error': 'Invalid request method.'}, status=405) @@ -694,11 +736,12 @@ class AccuracyViewSet(viewsets.ViewSet): if request.method == 'GET': base_query = Q(request_id=request_id) - subscription_request = SubscriptionRequest.objects.filter(base_query) + subscription_request = SubscriptionRequest.objects.filter( + base_query) if subscription_request.count() == 0: raise NotFoundException(excArgs=request_id) - + subscription_request = subscription_request.first() sample_result = { @@ -709,11 +752,12 @@ class AccuracyViewSet(viewsets.ViewSet): "purchase_date": None, "imei_number": [] } - + data = [] files = [] - subscription_request_files = SubscriptionRequestFile.objects.filter(request=subscription_request.id, file_category=FileCategory.Origin.value) + subscription_request_files = SubscriptionRequestFile.objects.filter( + request=subscription_request.id, file_category=FileCategory.Origin.value) for subscription_request_file in subscription_request_files: sub = subscription_request.subscription @@ -726,10 +770,12 @@ class AccuracyViewSet(viewsets.ViewSet): if not reviewed_result: reviewed_result = copy.deepcopy(sample_result) - reviewed_result["imei_number"] = [None for _ in range(subscription_request.doc_type.split(",").count("imei"))] + reviewed_result["imei_number"] = [None for _ in range( + subscription_request.doc_type.split(",").count("imei"))] if not feedback_result: feedback_result = copy.deepcopy(sample_result) - feedback_result["imei_number"] = [None for _ in range(subscription_request.doc_type.split(",").count("imei"))] + feedback_result["imei_number"] = [None for _ in range( + subscription_request.doc_type.split(",").count("imei"))] if not predicted_result: predicted_result = copy.deepcopy(sample_result) @@ -755,14 +801,17 @@ class AccuracyViewSet(viewsets.ViewSet): reviewed_result = subscription_request.reviewed_result feedback_result = subscription_request.feedback_result - predicted_result = predict_result_to_ready(subscription_request.predict_result) + predicted_result = predict_result_to_ready( + subscription_request.predict_result) if not reviewed_result: reviewed_result = copy.deepcopy(sample_result) - reviewed_result["imei_number"] = [None for _ in range(subscription_request.doc_type.split(",").count("imei"))] + reviewed_result["imei_number"] = [None for _ in range( + subscription_request.doc_type.split(",").count("imei"))] if not feedback_result: feedback_result = copy.deepcopy(sample_result) - feedback_result["imei_number"] = [None for _ in range(subscription_request.doc_type.split(",").count("imei"))] + feedback_result["imei_number"] = [None for _ in range( + subscription_request.doc_type.split(",").count("imei"))] if not predicted_result: predicted_result = copy.deepcopy(sample_result) @@ -782,7 +831,7 @@ class AccuracyViewSet(viewsets.ViewSet): 'Server Processing Time (ms)': subscription_request.preprocessing_time + subscription_request.ai_inference_time, 'Is Reviewed': subscription_request.is_reviewed, 'Feedback Accuracy': subscription_request.feedback_accuracy, - 'Reviewed Accuracy': subscription_request.reviewed_accuracy, + 'Reviewed Accuracy': subscription_request.reviewed_accuracy, 'Created At': subscription_request.created_at.isoformat(), 'Updated At': subscription_request.updated_at.isoformat(), 'raw_accuracy': subscription_request.raw_accuracy*100 if isinstance(subscription_request.raw_accuracy, float) else None, @@ -793,31 +842,36 @@ class AccuracyViewSet(viewsets.ViewSet): } return JsonResponse(response) - + elif request.method == 'POST': data = request.data base_query = Q(request_id=request_id) - subscription_request = SubscriptionRequest.objects.filter(base_query) + subscription_request = SubscriptionRequest.objects.filter( + base_query) if subscription_request.count() == 0: raise NotFoundException(excArgs=request_id) - + subscription_request = subscription_request.first() - subscription_request_files = SubscriptionRequestFile.objects.filter(request=subscription_request.id) + subscription_request_files = SubscriptionRequestFile.objects.filter( + request=subscription_request.id) if "reviewed_result" not in data: raise InvalidException(excArgs=f'reviewed_result') - + reviewed_result = data["reviewed_result"] if not subscription_request.predict_result: raise InvalidException(excArgs=f'request_id') - validate_review(reviewed_result, len(subscription_request.predict_result.get("content", {}).get("document", [{}])[0].get("content", [{}, {}, {}, {}])[3].get("value", []))) + validate_review(reviewed_result, len(subscription_request.predict_result.get( + "content", {}).get("document", [{}])[0].get("content", [{}, {}, {}, {}])[3].get("value", []))) reviewed_result['request_id'] = request_id for subscription_request_file in subscription_request_files: if subscription_request_file.doc_type == 'invoice': - subscription_request_file.reviewed_result = copy.deepcopy(reviewed_result) - subscription_request_file.reviewed_result['imei_number'] = [] + subscription_request_file.reviewed_result = copy.deepcopy( + reviewed_result) + subscription_request_file.reviewed_result['imei_number'] = [ + ] elif subscription_request_file.doc_type == 'imei': subscription_request_file.reviewed_result = { "retailername": None, @@ -825,8 +879,9 @@ class AccuracyViewSet(viewsets.ViewSet): "invoice_no": None, "purchase_date": [], "imei_number": []} - if len(reviewed_result["imei_number"]) - 1 >= subscription_request_file.index_in_request: - subscription_request_file.reviewed_result["imei_number"] = reviewed_result["imei_number"][subscription_request_file.index_in_request] + if len(reviewed_result["imei_number"])-1 >= subscription_request_file.index_in_request: + subscription_request_file.reviewed_result["imei_number"] = [reviewed_result[ + "imei_number"][subscription_request_file.index_in_request]] subscription_request_file.save() subscription_request.reviewed_result = reviewed_result @@ -837,7 +892,7 @@ class AccuracyViewSet(viewsets.ViewSet): return JsonResponse({'message': 'success.'}, status=200) else: return JsonResponse({'error': 'Invalid request method.'}, status=405) - + @extend_schema( request={ 'multipart/form-data': { @@ -861,23 +916,25 @@ class AccuracyViewSet(viewsets.ViewSet): def request_image(self, request, request_id=None, request_image_id=None): if request.method == 'POST': data = request.data - + base_query = Q(request_id=request_id) - subscription_request = SubscriptionRequest.objects.filter(base_query) + subscription_request = SubscriptionRequest.objects.filter( + base_query) if subscription_request.count() == 0: raise NotFoundException(excArgs=request_id) - + subscription_request = subscription_request.first() - subscription_request_files = SubscriptionRequestFile.objects.filter(request=subscription_request.id) + subscription_request_files = SubscriptionRequestFile.objects.filter( + request=subscription_request.id) if "reason" not in data: raise InvalidException(excArgs=f'reason') if "solution" not in data: raise InvalidException(excArgs=f'solution') - + reason = data["reason"] solution = data["solution"] @@ -889,7 +946,8 @@ class AccuracyViewSet(viewsets.ViewSet): subscription_request_file.save() is_available = True if not is_available: - raise NotFoundException(excArgs=request_id + "/" + request_image_id) + raise NotFoundException( + excArgs=request_id + "/" + request_image_id) return JsonResponse({'message': 'success.'}, status=200) else: diff --git a/cope2n-api/fwd_api/celery_worker/process_report_tasks.py b/cope2n-api/fwd_api/celery_worker/process_report_tasks.py index 5ffd09b..c738c82 100755 --- a/cope2n-api/fwd_api/celery_worker/process_report_tasks.py +++ b/cope2n-api/fwd_api/celery_worker/process_report_tasks.py @@ -110,7 +110,7 @@ def create_accuracy_report(report_id, **kwargs): if request.status != 200 or not (request.reviewed_result or request.feedback_result): # Failed requests or lack of reviewed_result/feedback_result continue - request_att, _report_files, _att = calculate_a_request(report, request) + request_att, _report_files, _atts = calculate_a_request(report, request) report_files += _report_files report_engine.add(request, _report_files, report) request.feedback_accuracy = {"imei_number": mean_list(request_att["acc"]["feedback"].get("imei_number", [None])), @@ -125,10 +125,11 @@ def create_accuracy_report(report_id, **kwargs): "invoice_no": mean_list(request_att["acc"]["reviewed"].get("invoice_no", [None]))} rq_accuracy = {"feedback": [], "reviewed": []} - - for t in _att["acc"].keys(): - for cl in _att["acc"][t].keys(): - rq_accuracy[t] += _att["acc"][t][cl] + + for _att in _atts: + for t in _att["acc"].keys(): + for cl in _att["acc"][t].keys(): + rq_accuracy[t] += _att["acc"][t][cl] # for rpf in _report_files: # for cl in rpf.feedback_accuracy.keys(): diff --git a/cope2n-api/fwd_api/utils/accuracy.py b/cope2n-api/fwd_api/utils/accuracy.py index 5d7383f..1d5e938 100755 --- a/cope2n-api/fwd_api/utils/accuracy.py +++ b/cope2n-api/fwd_api/utils/accuracy.py @@ -797,7 +797,7 @@ def calculate_a_request(report, request): 0: "No", 1: "Yes"} return review_status.get(input, "N/A") - + atts = [] request_att = {"acc": {"feedback": {"imei_number": [], "purchase_date": [], "retailername": [], @@ -829,6 +829,7 @@ def calculate_a_request(report, request): for image in images: status, att = calculate_subcription_file(image) + atts.append(att) att["acc"]["feedback"], fb_max_indexes = acc_maximize_list_values(att["acc"]["feedback"]) att["acc"]["reviewed"], rv_max_indexes = acc_maximize_list_values(att["acc"]["reviewed"]) @@ -945,7 +946,7 @@ def calculate_a_request(report, request): print(f"[ERROR]: failed to calculate request: {request.request_id} - request_file: {image.file_name} because of {e}") continue - return request_att, report_files, att + return request_att, report_files, atts def calculate_subcription_file(subcription_request_file): att = {"acc": {"feedback": {}, diff --git a/cope2n-fe/package.json b/cope2n-fe/package.json index 7798b18..447642a 100644 --- a/cope2n-fe/package.json +++ b/cope2n-fe/package.json @@ -44,6 +44,7 @@ "pdfjs-dist": "^3.11.174", "process": "^0.11.10", "react": "^18.2.0", + "react-awesome-lightbox": "^1.8.1", "react-chartjs-2": "^5.2.0", "react-dom": "^18.2.0", "react-hotkeys-hook": "^4.5.0", @@ -51,6 +52,7 @@ "react-office-viewer": "^1.0.4", "react-router-dom": "^6.6.1", "styled-components": "^5.3.6", + "ts-node": "^10.9.2", "uuid": "^9.0.0" }, "devDependencies": { diff --git a/cope2n-fe/src/layouts/main-layout.tsx b/cope2n-fe/src/layouts/main-layout.tsx index c77c5e0..eee7337 100644 --- a/cope2n-fe/src/layouts/main-layout.tsx +++ b/cope2n-fe/src/layouts/main-layout.tsx @@ -119,7 +119,7 @@ export const MainLayout = ({ children }: { children: React.ReactNode }) => { style={{ height: '100%', overflow: 'auto', - padding: 32, + padding: 16, background: colorBgContainer, }} > diff --git a/cope2n-fe/src/pages/reviews/FileCard.tsx b/cope2n-fe/src/pages/reviews/FileCard.tsx index 363e5c6..1c3fd82 100644 --- a/cope2n-fe/src/pages/reviews/FileCard.tsx +++ b/cope2n-fe/src/pages/reviews/FileCard.tsx @@ -31,6 +31,7 @@ const FileCard = ({ file, isSelected, onClick, setIsReasonModalOpen }) => { > {file['Doc Type'].toUpperCase()} +
{ + const fileName = file['File Name']; + + return ( +
+
+

+ {file['Doc Type'].toUpperCase()} +

+ + {fileName ? fileName.substring(0, 25).replace('temp_', '') : fileName} + +
+
+ + +
+
+ ); +}; + +export default FileCard; diff --git a/cope2n-fe/src/pages/reviews2/api.ts b/cope2n-fe/src/pages/reviews2/api.ts new file mode 100644 index 0000000..a8c1885 --- /dev/null +++ b/cope2n-fe/src/pages/reviews2/api.ts @@ -0,0 +1,79 @@ +import { baseURL } from 'request/api'; + +export const fetchAllRequests = async ( + filterDateRange, + filterSubsidiaries, + filterReviewState, + filterIncludeTests, + page = 1, + page_size = 20, + max_accuracy = 100 +) => { + const startDate = + filterDateRange && filterDateRange[0] ? filterDateRange[0] : ''; + const endDate = + filterDateRange && filterDateRange[1] ? filterDateRange[1] : ''; + let filterStr = ''; + filterStr += `page=${page}&page_size=${page_size}&`; + if (filterSubsidiaries) { + filterStr += `subsidiary=${filterSubsidiaries}&`; + } + if (filterReviewState) { + filterStr += `is_reviewed=${filterReviewState}&`; + } + if (filterIncludeTests) { + filterStr += `includes_test=${filterIncludeTests}&`; + } + if (startDate && endDate) { + filterStr += `start_date=${startDate}&end_date=${endDate}&`; + } + filterStr += `max_accuracy=${max_accuracy}` + const token = localStorage.getItem('sbt-token') || ''; + const data = await fetch(`${baseURL}/ctel/request_list/?${filterStr}`, { + method: 'GET', + headers: { + Authorization: `${JSON.parse(token)}`, + }, + }).then(async (res) => { + const data = await res.json(); + return data; + }); + return data; +}; + +export const updateRevisedData = async ( + requestID: any, + newRevisedData: any, +) => { + // const requestID = ; + const token = localStorage.getItem('sbt-token') || ''; + const result = await fetch(`${baseURL}/ctel/request/${requestID}/`, { + method: 'POST', + headers: { + Authorization: `${JSON.parse(token)}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + reviewed_result: newRevisedData, + }), + }).catch((error) => { + console.log(error); + throw error; + }); + if (result.status != 200) { + throw new Error('Could not update revised data'); + } +}; + +export const fetchRequest = async (id) => { + const token = localStorage.getItem('sbt-token') || ''; + const response = await fetch(`${baseURL}/ctel/request/${id}/`, { + method: 'GET', + headers: { + Authorization: `${JSON.parse(token)}`, + }, + }); + return await ( + await response.json() + ).subscription_requests[0]; +}; diff --git a/cope2n-fe/src/pages/reviews2/const.ts b/cope2n-fe/src/pages/reviews2/const.ts new file mode 100644 index 0000000..e4b1f1d --- /dev/null +++ b/cope2n-fe/src/pages/reviews2/const.ts @@ -0,0 +1,49 @@ +import { t } from "@lingui/macro"; + +export const counter_measure_map = { + invalid_image: 'Remove this image from the evaluation report', + missing_information: 'Remove this image from the evaluation report', + too_blurry_text: 'Remove this image from the evaluation report', + too_small_text: 'Remove this image from the evaluation report', + ocr_cannot_extract: 'Improve OCR', + wrong_feedback: 'Update revised result and re-calculate accuracy', + handwritten: 'Remove this image from the evaluation report', + other: 'other', +}; + +export const REASON_BAD_QUALITY = [ + { value: 'invalid_image', label: t`Invalid image` }, + { + value: 'missing_information', + label: t`Missing information`, + }, + { value: 'too_blurry_text', label: t`Too blurry text` }, + { value: 'too_small_text', label: t`Too small text` }, + { value: 'handwritten', label: t`Handwritten` }, + { value: 'wrong_feedback', label: t`Wrong Feedback` }, + { value: 'ocr_cannot_extract', label: t`Ocr cannot extract` }, + { value: 'other', label: t`Other` }, +] + +export const SOLUTION_BAD_QUALITY =[ + { + value: 'Remove this image from the evaluation report', + label: t`Remove this image from the evaluation report`, + }, + { value: 'Improve OCR', label: t`Improve OCR` }, + { + value: 'Update revised result and re-calculate accuracy', + label: t`Update revised result and re-calculate accuracy`, + }, + { value: 'other', label: t`Other` }, +] + +export const SUBSIDIARIES = [ + { value: 'SEAO', label: 'SEAO' }, + { value: 'SEAU', label: 'SEAU' }, + { value: 'SESP', label: 'SESP' }, + { value: 'SME', label: 'SME' }, + { value: 'SEPCO', label: 'SEPCO' }, + { value: 'TSE', label: 'TSE' }, + { value: 'SEIN', label: 'SEIN' }, +] \ No newline at end of file diff --git a/cope2n-fe/src/pages/reviews2/index.tsx b/cope2n-fe/src/pages/reviews2/index.tsx new file mode 100644 index 0000000..46a11bf --- /dev/null +++ b/cope2n-fe/src/pages/reviews2/index.tsx @@ -0,0 +1,1009 @@ +import { + ArrowLeftOutlined, + ArrowRightOutlined, + CheckCircleOutlined, + ClockCircleFilled, + CopyOutlined, + FullscreenExitOutlined, + FullscreenOutlined, +} from '@ant-design/icons'; +import { t } from '@lingui/macro'; +import { Viewer } from '@react-pdf-viewer/core'; +import { + Button, + DatePicker, + Form, + Input, + InputNumber, + message, + Modal, + notification, + Select, + Spin, + Tag, +} from 'antd'; +import { useEffect, useState } from 'react'; +import Lightbox from 'react-awesome-lightbox'; +import 'react-awesome-lightbox/build/style.css'; +import { useHotkeys } from 'react-hotkeys-hook'; +import { baseURL } from 'request/api'; +// Import the styles +import '@react-pdf-viewer/core/lib/styles/index.css'; + +import { badQualityReasonSubmit } from 'request'; +import { normalizeData } from 'utils/field-value-process'; +import { fetchAllRequests, fetchRequest } from './api'; +import { + counter_measure_map, + REASON_BAD_QUALITY, + SOLUTION_BAD_QUALITY, + SUBSIDIARIES, +} from './const'; +import FileCard from './FileCard'; + +const ReviewPage = () => { + const [loading, setLoading] = useState(false); + const [fullscreen, setFullscreen] = useState(false); + const [isModalOpen, setIsModalOpen] = useState(false); + const [isReasonModalOpen, setIsReasonModalOpen] = useState(false); + const [selectedFileId, setSelectedFileId] = useState(0); + const [selectedFileData, setSelectedFileData] = useState(null); + const [selectedFileName, setSelectedFileName] = useState(null); + + // Default date range: 1 month ago to today + const [filterDateRange, setFilterDateRange] = useState(['', '']); + + const [filterSubsidiaries, setFilterSubsidiaries] = useState('SEAO'); + const [filterAccuracy, setFilterAccuracy] = useState(100); + const [filterReviewState, setFilterReviewState] = useState('all'); + const [filterIncludeTests, setFilterIncludesTests] = useState('true'); + // const [requests, setRequests] = useState([]); + const [currentRequest, setCurrentRequest] = useState(null); + const [currentRequestIndex, setCurrentRequestIndex] = useState(1); + const [hasNextRequest, setHasNextRequest] = useState(true); + const [totalRequests, setTotalPages] = useState(0); + const [dataSource, setDataSource] = useState([]); + + const [pageIndexToGoto, setPageIndexToGoto] = useState(1); + + const [reason, setReason] = useState(''); + const [otherReason, setOtherReason] = useState(''); + const [solution, setSolution] = useState(''); + const [otherSolution, setOtherSolution] = useState(''); + const [imageLoading, setImageLoading] = useState(false); + + useEffect(() => { + if (reason) { + setSolution(counter_measure_map[reason]); + } + }, [reason]); + + const setAndLoadSelectedFile = async (requestData, index) => { + setSelectedFileId(index); + if (!requestData['Files'][index]) { + setSelectedFileData('FAILED_TO_LOAD_FILE'); + setImageLoading(false); + return; + } + const fileName = requestData['Files'][index]['File Name']; + const fileURL = requestData['Files'][index]['File URL']; + const response = await fetch(fileURL); + if (response.status === 200) { + setSelectedFileName(fileName); + setSelectedFileData(fileURL); + console.log('Loading file: ' + fileName); + console.log('URL: ' + fileURL); + } else { + setSelectedFileData('FAILED_TO_LOAD_FILE'); + setImageLoading(false); + } + }; + + console.log(dataSource); + const loadCurrentRequest = (requestIndex) => { + setLoading(true); + setImageLoading(true); + fetchAllRequests( + filterDateRange, + filterSubsidiaries, + filterReviewState, + filterIncludeTests, + requestIndex, + 1, + filterAccuracy, + ) + .then((data) => { + // setRequests(data?.subscription_requests); + // setHasNextRequest(data?.subscription_requests.length > 1); + setTotalPages(data?.page?.total_requests); + setHasNextRequest(requestIndex < data?.page?.total_requests); + const requestData = fetchRequest( + data?.subscription_requests[0].RequestID, + ); + requestData + .then(async (data) => { + console.log('🚀 ~ .then ~ data:', data); + if (data) setCurrentRequest(data); + const predicted = + data && data['Predicted Result'] ? data['Predicted Result'] : {}; + const submitted = + data && data['Feedback Result'] ? data['Feedback Result'] : {}; + const revised = + data && data['Reviewed Result'] ? data['Reviewed Result'] : {}; + const keys = Object.keys(predicted); + const tableRows = []; + for (let i = 0; i < keys.length; i++) { + let instance = {}; + instance['key'] = keys[i]; + instance['predicted'] = predicted[keys[i]]; + instance['submitted'] = submitted[keys[i]]; + instance['revised'] = revised[keys[i]]; + tableRows.push(instance); + } + setDataSource(tableRows); + setAndLoadSelectedFile(data, 0); + }) + .finally(() => { + setLoading(false); + }); + }) + .finally(() => { + setLoading(false); + }); + }; + + const gotoNextRequest = () => { + if (currentRequestIndex >= totalRequests) { + return; + } + const nextRequestIndex = currentRequestIndex + 1; + setCurrentRequestIndex(nextRequestIndex); + loadCurrentRequest(nextRequestIndex); + }; + + const gotoPreviousRequest = () => { + if (currentRequestIndex === 1) { + return; + } + const previousRequestIndex = currentRequestIndex - 1; + setCurrentRequestIndex(previousRequestIndex); + loadCurrentRequest(previousRequestIndex); + }; + + const reloadFilters = () => { + setCurrentRequestIndex(1); + fetchAllRequests( + filterDateRange, + filterSubsidiaries, + filterReviewState, + filterIncludeTests, + 1, + 1, + filterAccuracy, + ).then((data) => { + setTotalPages(data?.page?.total_requests); + // setRequests(data?.subscription_requests); + // setHasNextRequest(data?.subscription_requests.length > 1); + setHasNextRequest(1 < data?.page?.total_requests); + const firstRequest = fetchRequest( + data?.subscription_requests[0].RequestID, + ); + firstRequest.then(async (data) => { + if (data) setCurrentRequest(data); + setAndLoadSelectedFile(data, 0); + setTimeout(() => { + loadCurrentRequest(1); + }, 500); + }); + }); + }; + + useEffect(() => { + setCurrentRequestIndex(1); + fetchAllRequests( + filterDateRange, + filterSubsidiaries, + filterReviewState, + filterIncludeTests, + 1, + 1, + filterAccuracy, + ).then((data) => { + setTotalPages(data?.page?.total_requests); + // setRequests(data?.subscription_requests); + setHasNextRequest(1 < data?.page?.total_requests); + const firstRequest = fetchRequest( + data?.subscription_requests[0].RequestID, + ); + firstRequest.then(async (data) => { + if (data) setCurrentRequest(data); + setAndLoadSelectedFile(data, 0); + }); + }); + }, []); + + // "Key", "Accuracy", "Submitted", "Revised" + interface DataType { + key: string; + accuracy: number; + submitted: string; + revised: string; + } + + const updateRevisedData = async (newRevisedData: any) => { + const requestID = currentRequest.RequestID; + const token = localStorage.getItem('sbt-token') || ''; + const result = await fetch(`${baseURL}/ctel/request/${requestID}/`, { + method: 'POST', + headers: { + Authorization: `${JSON.parse(token)}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + reviewed_result: newRevisedData, + }), + }).catch((error) => { + console.log(error); + throw error; + }); + if (result.status != 200) { + throw new Error('Could not update revised data'); + } + }; + + const handleSave = (row: DataType) => { + const newData = [...dataSource]; + const index = newData.findIndex((item) => row.key === item.key); + const item = newData[index]; + newData.splice(index, 1, { + ...item, + ...row, + }); + const newRevisedData = {}; + for (let i = 0; i < newData.length; i++) { + newData[i].revised = normalizeData(newData[i].key, newData[i].revised); + newRevisedData[newData[i].key] = newData[i].revised; + } + updateRevisedData(newRevisedData) + .then(() => { + // "[Is Reviewed]" => true + setCurrentRequest({ + ...currentRequest, + ['Is Reviewed']: true, + }); + }) + .then(() => { + setDataSource(newData); + }) + .catch((error) => { + message.error( + 'Could not update revised data. Please check the format.', + ); + }); + }; + + const submitRevisedData = async () => { + const newData = [...dataSource]; + const newRevisedData = {}; + for (let i = 0; i < newData.length; i++) { + newData[i].revised = normalizeData(newData[i].key, newData[i].revised); + newRevisedData[newData[i].key] = newData[i].revised; + } + updateRevisedData(newRevisedData).then(() => { + // "[Is Reviewed]" => true + setCurrentRequest({ + ...currentRequest, + ['Is Reviewed']: true, + }); + }); + }; + + // use left/right keys to navigate + useHotkeys('left', gotoPreviousRequest); + useHotkeys('right', gotoNextRequest); + + const fileExtension = selectedFileName + ? selectedFileName.split('.').pop() + : ''; + + const [lightBox, setLightBox] = useState(false); + + return ( +
+
+ +
+
+
+ + {totalRequests ? ( + <> +    Request ID: {currentRequest?.RequestID} + + ) : ( + '' + )} +
+
+ + +
+
+
+
+ {totalRequests > 0 && ( +
+
+

+ Files ({currentRequest?.Files?.length}) +

+ {currentRequest?.Files.map((file, index) => ( + { + setAndLoadSelectedFile(currentRequest, index); + setImageLoading(true); + }} + setIsReasonModalOpen={setIsReasonModalOpen} + /> + ))} +
+ {totalRequests > 0 && ( +
+ Request ID + + Redemption + + Created date + + Request time + + Processing time + + Raw accuracy + +
+ )} +
+ )} +
+
+
+ + {selectedFileData === 'FAILED_TO_LOAD_FILE' ? ( +

Failed to load file.

+ ) : fileExtension === 'pdf' ? ( + + ) : ( + <> + file setLightBox(true)} + onLoad={() => { + setImageLoading(false); + }} + /> + + {lightBox && ( + setLightBox(false)} + > + )} + + )} +
+
+ +
+
+

+ {totalRequests + ? 'No: ' + currentRequestIndex + '/' + totalRequests + : 'No Request. Adjust your search criteria to see more results.'} +

+ {currentRequest && + (currentRequest['Is Reviewed'] ? ( + } + color='success' + style={{ padding: '4px 16px' }} + > + Reviewed + + ) : ( + } + color='warning' + style={{ padding: '4px 16px' }} + > + Not Reviewed + + ))} +
+
+
+ + + { + if (pageIndexToGoto > totalRequests) { + message.error('RequestID is out of range.'); + return; + } + if (pageIndexToGoto < 1) { + message.error('RequestID is out of range.'); + return; + } + setCurrentRequestIndex(pageIndexToGoto); + loadCurrentRequest(pageIndexToGoto); + }} + > + Go to + + } + value={pageIndexToGoto} + onChange={(e) => { + setPageIndexToGoto(parseInt(e.target.value)); + }} + /> +
+
+
+
+
+
+ {dataSource?.map((data) => { + return ( +
+
+

{data.key}

+
+ + + +
+ ); + })} +
+ +
+
+ { + setIsModalOpen(false); + reloadFilters(); + }} + onCancel={() => { + setIsModalOpen(false); + }} + > +
+ + { + setFilterDateRange(dateString); + }} + style={{ width: 200 }} + /> + + +
+ + + + + + + + {reason === 'other' && ( + { + setOtherReason(e.target.value); + }} + style={{ + width: 200, + marginTop: 30, + marginBottom: 24, + marginLeft: 10, + }} + /> + )} +
+
+
+ + + {counter_measure_map[reason]} + + { + setOtherSolution(e.target.value); + }} + style={{ + width: 200, + marginBottom: 24, + marginLeft: 10, + }} + /> + )} +
+
+
+ ); +}; + +export default ReviewPage; diff --git a/cope2n-fe/src/request/api.ts b/cope2n-fe/src/request/api.ts index 2eecbfb..36cf3c5 100644 --- a/cope2n-fe/src/request/api.ts +++ b/cope2n-fe/src/request/api.ts @@ -11,7 +11,7 @@ const environment = process.env.NODE_ENV; const AXIOS_TIMEOUT_MS = 30 * 60 * 1000; // This config sastified long-live upload file request const EXPIRED_PASSWORD_SIGNAL = 'expired_password'; -export const baseURL = environment === 'development' ? 'http://107.120.133.27:9881/api' : '/api'; +export const baseURL = environment === 'development' ? 'http://107.120.133.27:9000/api' : '/api'; // export const baseURL = '/api'; diff --git a/cope2n-fe/src/routes/useAppRouter.tsx b/cope2n-fe/src/routes/useAppRouter.tsx index 3e1419b..899824f 100644 --- a/cope2n-fe/src/routes/useAppRouter.tsx +++ b/cope2n-fe/src/routes/useAppRouter.tsx @@ -12,6 +12,7 @@ const DashboardPage = React.lazy(() => import('pages/dashboard')); const InferencePage = React.lazy(() => import('pages/inference/index')); const ReviewsPage = React.lazy(() => import('pages/reviews')); +const ReviewsPage2 = React.lazy(() => import('pages/reviews2')); const ReportsPage = React.lazy(() => import('pages/reports')); const ReportDetailPage = React.lazy( () => import('pages/reports/report_detail'), @@ -65,6 +66,11 @@ export function useAppRouter() { path: '/reviews', element: } />, }, + { + path: '/reviews2', + element: } />, + }, + { path: '/users', element: } />, diff --git a/cope2n-fe/src/utils/field-value-process.ts b/cope2n-fe/src/utils/field-value-process.ts index 69ab9d5..6df0735 100644 --- a/cope2n-fe/src/utils/field-value-process.ts +++ b/cope2n-fe/src/utils/field-value-process.ts @@ -21,6 +21,9 @@ const normalizeData = (key, value) => { if (["imei_number", "purchase_date"].includes(key) && typeof(value) === "string") { value = value.split(","); } + if(key === 'imei_number' && value === null){ + value = [null] + } if (typeof (value) === "object" && value?.length > 0) { for (let i = 0; i < value.length; i++) { value[i] = normalizeData("text", value[i]);