Fix issues on 29 Feb

This commit is contained in:
dx-tan 2024-02-28 18:45:10 +07:00
parent 61a9350e5d
commit e1af147f32
15 changed files with 307 additions and 67 deletions

View File

@ -237,6 +237,9 @@ SUBS = {
"SEAO": "seao"
}
BAD_THRESHOLD = 0.75
NEED_REVIEW = 1.0
SUB_FOR_BILLING = ["all", "seao"]
CACHES = {

View File

@ -505,6 +505,7 @@ class AccuracyViewSet(viewsets.ViewSet):
# aggregate_overview from subsibdiaries
subsidiaries_to_include = list(settings.SUBS.values())
subsidiaries_to_include.remove("all")
subsidiaries_to_include.remove("seao")
subsidiary_overview_reports = []
for sub in subsidiaries_to_include:
key = f"{sub}_{duration}"

View File

@ -119,7 +119,7 @@ def process_csv_feedback(csv_file_path, feedback_id):
continue
_predict_result = copy.deepcopy(predict_result_to_ready(sub_rq.predict_result))
_feedback_result = copy.deepcopy(sub_rq.feedback_result)
_reviewed_result = copy.deepcopy(sub_rq.reviewed_result)
# _reviewed_result = copy.deepcopy(sub_rq.reviewed_result)
try:
image.processing_time = time_cost.get(image.doc_type, [0 for _ in range(image.index_in_request)])[image.index_in_request]
except Exception as e:
@ -132,19 +132,19 @@ def process_csv_feedback(csv_file_path, feedback_id):
if _feedback_result:
_feedback_result["imei_number"] = []
if _reviewed_result:
_reviewed_result["imei_number"] = []
# if _reviewed_result:
# _reviewed_result["imei_number"] = []
else:
try:
_predict_result = {"retailername": None, "sold_to_party": None, "purchase_date": [], "imei_number": [_predict_result["imei_number"][image.index_in_request]]}
_feedback_result = {"retailername": None, "sold_to_party": None, "purchase_date": None, "imei_number": [_feedback_result["imei_number"][image.index_in_request]]} if _feedback_result else None
_reviewed_result = {"retailername": None, "sold_to_party": None, "purchase_date": None, "imei_number": [_reviewed_result["imei_number"][image.index_in_request]]} if _reviewed_result else None
# _reviewed_result = {"retailername": None, "sold_to_party": None, "purchase_date": None, "imei_number": [_reviewed_result["imei_number"][image.index_in_request]]} if _reviewed_result else None
except Exception as e:
print (f"[ERROR]: {request_id} - {e}")
image.predict_result = _predict_result
image.feedback_result = _feedback_result
image.reviewed_result = _reviewed_result
# image.reviewed_result = _reviewed_result
image.save()
# update log into database
feedback_rq = FeedbackRequest.objects.filter(feedback_id=feedback_id).first()

View File

@ -208,6 +208,7 @@ def make_a_report_2(report_id, query_set):
bad_image_list = []
number_images = 0
number_bad_images = 0
review_process = []
# TODO: Multithreading
# Calculate accuracy, processing time, ....Then save.
subscription_requests = SubscriptionRequest.objects.filter(base_query).order_by('created_at')
@ -245,6 +246,7 @@ def make_a_report_2(report_id, query_set):
errors += request_att["err"]
num_request += 1
review_process += request_att.get("is_reviewed", [])
report_fine_data, _save_data = report_engine.save(report.report_id, query_set.get("is_daily_report", False), query_set["include_test"])
transaction_att = count_transactions(start_date, end_date, report.subsidiary)
@ -278,6 +280,10 @@ def make_a_report_2(report_id, query_set):
report.reviewed_accuracy = acumulated_acc["reviewed"]
report.combined_accuracy = acumulated_acc["acumulated"]
report.num_reviewed = review_process.count(1)
report.num_not_reviewed = review_process.count(0)
report.num_no_reviewed = review_process.count(-1)
report.errors = "|".join(errors)
report.status = "Ready"
report.save()
@ -292,7 +298,9 @@ def make_a_report_2(report_id, query_set):
# Save overview dashboard
# multiple accuracy by 100
save_data = copy.deepcopy(_save_data)
review_key = "review_process"
for i, dat in enumerate(report_fine_data):
report_fine_data[i][review_key] = report_fine_data[i][review_key]*100
keys = [x for x in list(dat.keys()) if "accuracy" in x.lower()]
keys_percent = "images_quality"
for x_key in report_fine_data[i][keys_percent].keys():

View File

@ -1,4 +1,5 @@
# myapp/management/commands/mycustomcommand.py
from io import StringIO
from django.core.management.base import BaseCommand
from tqdm import tqdm
from fwd_api.models import SubscriptionRequestFile, SubscriptionRequest
@ -7,6 +8,12 @@ import traceback
import copy
import csv
PREDICT_INDEX = 3
FEEDBACK_INDEX = 2
REVIEWED_INDEX = 4
REASON_INDEX = 6
COUNTER_INDEX = 9
class Command(BaseCommand):
help = 'Refactor database for image level'
@ -14,9 +21,10 @@ class Command(BaseCommand):
# Add your command-line arguments here
parser.add_argument('test', type=str, help='Value for the argument')
def process_request(self, request, predict_result, user_feedback, reviewed_result, reason):
def process_request(self, total, failed, request, predict_result, user_feedback, reviewed_result, reason, counter):
if len(request.request_id.split(".")[0].split("_")) < 2:
return
total[0] += 1
request_review = copy.deepcopy(request.reviewed_result)
@ -74,7 +82,10 @@ class Command(BaseCommand):
"imei_number": [reviewed_result]
} if _reviewed_result else None
image.reviewed_result = _reviewed_result
if reason:
image.reason = reason
if counter:
image.counter_measures = counter
image.save()
request.reviewed_result = request_review
request.reviewed_result["request_id"] = request.request_id
@ -83,13 +94,17 @@ class Command(BaseCommand):
except Exception as e:
self.stdout.write(self.style.ERROR(f"Request: {request.request_id} failed with {e}"))
failed[0] += 1
print(traceback.format_exc())
if not is_match:
failed[0] += 1
print("FAIL =====>", image.feedback_result, predict_result, user_feedback, reviewed_result)
def handle(self, *args, **options):
test = options['test']
total = [0]
failed = [0]
#open csv file
with open(test, 'r') as csvfile:
reader = csv.reader(csvfile)
@ -100,8 +115,10 @@ class Command(BaseCommand):
if not request:
print("Not found ====>", row)
else:
# request, predict_result, user_feedback, reviewed_result
self.process_request(request, row[3], row[2], row[4], row[8])
# request, predict_result, user_feedback, reviewed_result, reason, counter
self.process_request(total, failed, request, row[PREDICT_INDEX], row[FEEDBACK_INDEX], row[REVIEWED_INDEX], row[REASON_INDEX], row[COUNTER_INDEX])
index += 1
self.stdout.write(self.style.SUCCESS(f"Failed/Total: {failed[0]}/{total[0]}"))
self.stdout.write(self.style.SUCCESS('Sample Django management command executed successfully!'))

View File

@ -2,17 +2,57 @@
from django.core.management.base import BaseCommand
from tqdm import tqdm
from fwd_api.models import SubscriptionRequestFile, SubscriptionRequest
from fwd_api.exception.exceptions import InvalidException
from fwd_api.utils.accuracy import predict_result_to_ready
import traceback
import copy
from django.utils import timezone
RETAILER_LIST = [
'',
'Amazon.sg',
'Gain City (TV/AV)',
'Harvey Norman (TV/AV)',
'KRIS SHOP',
'Lazada (Samsung Brand Store)',
'M1 Shop',
'Mohamed Mustafa & Samsuddin CO (TV/AV)',
'Parisilk (TV/AV)',
'Shopee (Samsung Brand Store)',
'StarHub Shop',
'M1 Shop',
'Ectri',
'Whandising',
'3 Mobile',
'Mister Mobile',
'Lazada',
'Mister Mobile',
'Samsung Experience Store',
'A & Samsuddin Co.',
'Parisilk',
'Samsung Brand Store',
'Shopee',
'M1 Shop',
'Onephone Online',
'3 Mobile',
'Samsung Experience Store',
'Challenger',
'Eas Marketing',
'Ing Mobile',
'Starhub Shop',
'Mister Mobile',
'Onephone Online',
'Starho'
]
RETAILER_LIST = list(set(RETAILER_LIST))
class Command(BaseCommand):
help = 'Refactor database for image level'
def add_arguments(self, parser):
# Add your command-line arguments here
parser.add_argument('test', type=str, help='Value for the argument')
parser.add_argument('start', type=str, help='start date, sample: 2023-01-02T00:00:00+0700')
parser.add_argument('end', type=str, help='end date, sample: 2023-01-03T00:00:00+0700')
def process_request(self, request):
if len(request.request_id.split(".")[0].split("_")) < 2:
@ -29,7 +69,7 @@ class Command(BaseCommand):
for i, image in enumerate(images):
# temp_imei_SAP_20240127223644_a493434edbf84fc08aeb87ef6cdde102_0.jpg
try:
image.index_in_request = int(image.file_name.split(".")[0].split("_")[-1]) if len(image.file_name.split(".")[0].split("_")) > 4 else 0
# image.index_in_request = int(image.file_name.split(".")[0].split("_")[-1]) if len(image.file_name.split(".")[0].split("_")) > 4 else 0
image.doc_type = image.file_name.split(".")[0].split("_")[1] if len(image.file_name.split(".")[0].split("_")) > 4 else "all"
image.processing_time = time_cost[image.doc_type][image.index_in_request]
if not request.predict_result:
@ -53,7 +93,13 @@ class Command(BaseCommand):
else:
_predict_result = {"retailername": None, "sold_to_party": None, "purchase_date": [], "imei_number": [_predict_result["imei_number"][image.index_in_request]]}
_feedback_result = {"retailername": None, "sold_to_party": None, "purchase_date": None, "imei_number": [_feedback_result["imei_number"][image.index_in_request]]} if _feedback_result else None
if isinstance(_reviewed_result, dict) and (len(_reviewed_result.get("imei_number", [])) or any(element in RETAILER_LIST for element in _reviewed_result.get("imei_number", []))) :
_reviewed_result = {"retailername": None, "sold_to_party": None, "purchase_date": None, "imei_number": [_reviewed_result["imei_number"][image.index_in_request]]} if _reviewed_result else None
else:
_reviewed_result = None
request.reviewed_result = None
request.is_reviewed = False
request.save()
image.predict_result = _predict_result
image.feedback_result = _feedback_result
image.reviewed_result = _reviewed_result
@ -64,8 +110,113 @@ class Command(BaseCommand):
continue
def handle(self, *args, **options):
test = options['test']
start = options['start']
end = options['end']
white_list = [
'SAP_20240202130136_592fc76aca3248d596e2bedcc4340fee',
'SAP_20240202164711_b07f7acb723946d595f6184b70a45387',
'SAP_20240203210818_8c6f6dd2ae42434ab40c690022a1ff6e',
'SAP_20240203141331_c64f71dc006d437dae558c6523de7e7c',
'SAP_20240201212124_55d135704e354cf5bb029a6ee1501f9f',
'SAP_20240202214418_c06c1669ce7743bb85d0f999bf022b8d',
'SAP_20240203033349_0e3db9ca289f4c9db96a6a07feba0597',
'SAP_20240203195254_d899295781474d24a36fee4d5cad0fd1',
'SAP_20240203192612_d470b4300528481a886a4629d5e538ab',
'SAP_20240203202654_a2399fb471b942dcb9f27a1bb09f026f',
'SAP_20240131093605_3aec91f880234096932059b515e31bc2',
'SAP_20240202164256_9b1d8c5531514b6dbe9857b33ef1dd93',
'SAP_20240202194336_78d55dfff1684368b32405eb3bb16800',
'SAP_20240202194901_8b86c75484f14411be7ddd63b0f71abe',
'SAP_20240204140236_91b02047d6124224b98c1bca7ef5808a',
'SAP_20240204140236_91b02047d6124224b98c1bca7ef5808a',
'SAP_20240205085813_05939ccf8d6a44068a7394d40360c957',
'SAP_20240205085813_05939ccf8d6a44068a7394d40360c957',
'SAP_20240205121225_27d61c27da0445a5963f6dcf8bebc169',
'SAP_20240205133855_3c355550a1d7449aab04176c70b0338d',
'SAP_20240205133855_3c355550a1d7449aab04176c70b0338d',
'SAP_20240205234354_08e4fbba404f440ba87ee08394d11a93',
'SAP_20240206001800_e2e326de5adc4a12b9c7f713b96c70b9',
'SAP_20240202193951_17a709bb3f4c4c3a9ef34b777fcd8b0d',
'SAP_20240204161121_6e80153cf05f45b5aa46d08fd52ed454',
'SAP_20240131181202_94dc906cd86e4a89b08386c5a737f843',
'SAP_20240203175643_6a19fbd9839643219fd21af2d46ff67b',
'SAP_20240131211332_7f937cfc26614230b43e7e658a7e9002',
'SAP_20240131215607_6c02781a83804fce8ee4c44e8aade1e6',
'SAP_20240131232749_5b8cfe62bb5847baa5c2e8e006d8fd4c',
'SAP_20240201143938_a4da2d697e6f4eab86a97344ab6ef612',
'SAP_20240201222839_9711372c80a84df6aefd2cf494f881a6',
'SAP_20240201223812_e55a3ffd4e0940b9a3d1ab4058b88006',
'SAP_20240202052454_35dedef1afc44cf3a2ee5e8f03a2e871',
'SAP_20240202123705_10ed0532243d4fad971ab4ea9982848a',
'SAP_20240202160851_e62bf43cecc24ff6988e41b59727b60a',
'SAP_20240202173309_2bcaa76aadfd4f8c87f72a64344fc818',
'SAP_20240202174053_2e5b97362b14492b91235e9a259bfa42',
'SAP_20240202222341_f132f1fde9974bdd87a734c04c5b91a8',
'SAP_20240203105436_38192f4a3c2745828b7bab702f73b5a8',
'SAP_20240203112502_ca87267f28be4538b5cdc8c49cd951fe',
'SAP_20240203141104_a5c879a0f7d94e36822d0f10f5f446be',
'SAP_20240203143937_3730bb0fa62f4ea792dbaf6b75c190fa',
'SAP_20240203144238_d5ef7897e23b4bbc954854746c315f09',
'SAP_20240203151137_4de4ed9e1cd94c60af3e6588ab418cab',
'SAP_20240203153540_9214a78c5a2142ccbf18dea79a7f7d32',
'SAP_20240203174245_d463b37dce2a48b48cff9b10333da146',
'SAP_20240203185611_4892b09d22f84418b4472bbbcab554f4',
'SAP_20240203192059_82bcb05bc9cd48ab94c724783a78995a',
'SAP_20240203200323_7e1944a9e6f3477793a7d8682efe12a8',
'SAP_20240203202429_780d51302e2a402683d0e8b9f7a5206c',
'SAP_20240203213530_e9e38035733348a1a3050b269a988ed4',
'SAP_20240203220513_6939d446931044feaeb0ea39a6409024',
'SAP_20240203224040_476f975502084050b829397c0d543626',
'SAP_20240203230738_f4343b5dbc8d4fac98ee08c8b631e738',
'SAP_20240204013646_d7d89fe2d22a4e5f8575ba3af0785e31',
'SAP_20240204081015_4c7f0d11a24a4bcebfb1adc50ef4f34a',
'SAP_20240204094056_66c2606c43a44dde9879467556aed70a',
'SAP_20240204110605_aa45d3f72c72465190b083a8a2ded18a',
'SAP_20240204123216_658c952721be43f9b4d107a38aa396e1',
'SAP_20240204123216_658c952721be43f9b4d107a38aa396e1',
'SAP_20240204140721_30e6267e4aed4597b43e10320fa5f75c',
'SAP_20240204163159_b34afc9534cd41ea93aebf000bfb0fb4',
'SAP_20240204163832_c7051196f83e4f68bc09668ea273370b',
'SAP_20240204200851_4597724b85f74907b396658642c950e1',
'SAP_20240204201303_156a0bf95ac2434083f9ff519d215b8a',
'SAP_20240205122953_b2f78a10898048a09d21779372d910be',
'SAP_20240205185633_14afd3692c5c47f4a228689c89268595',
'SAP_20240205200646_f9b7bf31903649d1be41996174047b57',
'SAP_20240205204209_9b8ee17983404658a0121717945c7ea5',
'SAP_20240205235953_b92aaccd88214282880ec70034fbe0fc',
'SAP_20240203180224_7bbf6e08a37f4a968a27852b0f9348a4',
'SAP_20240204095520_f306f5b596db4c788f49d24da909ba3b',
'SAP_20240204131511_ba42a26ce05e4d279f3c4d956723ce0c',
'SAP_20240204153410_469368284b884c29b1213ea976956891',
'SAP_20240129173923_7809ec94fa014fd88406f954a5b85c65',
'SAP_20240131173059_3afe5da631fa44ec969baf8974092641',
'SAP_20240202213429_737af53cb1194c31a50094361e3090f2',
'SAP_20240203001712_5568f19671b941aab2fea2d93f6b688f',
'SAP_20240203155754_882632f2d2b640bba9a6ea246cbace0e',
'SAP_20240204090000_7272e17a7c4d468c8bb074c69a67edc6',
'SAP_20240205140231_5a5be97738a648dda0520b24a1957336',
'SAP_20240205172847_c36b8542b66148f6b473259438ca5280',
'SAP_20240205173225_e5fe5e9db09b4eee92a0b044eecea233',
'SAP_20240205183822_95dd07be90874db0ac1827dfeb6317e2',
]
if start or end:
try:
start_date = timezone.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S%z') # We care only about day precision only
end_date = timezone.datetime.strptime(end, '%Y-%m-%dT%H:%M:%S%z')
# start_date = timezone.make_aware(start_date)
# end_date = timezone.make_aware(end_date)
except Exception as e:
print(f"[INFO]: start: {start}")
print(f"[INFO]: end: {end}")
raise InvalidException(excArgs="Date format")
subcription_iter = SubscriptionRequest.objects.filter(created_at__range=(start_date, end_date))
else:
subcription_iter = SubscriptionRequest.objects.all()
for request in tqdm(subcription_iter.iterator()):
if request.request_id not in white_list:
continue
self.process_request(request)
self.stdout.write(self.style.SUCCESS('Sample Django management command executed successfully!'))

View File

@ -0,0 +1,33 @@
# Generated by Django 4.1.3 on 2024-02-28 09:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fwd_api', '0182_report_combined_accuracy'),
]
operations = [
migrations.AddField(
model_name='report',
name='num_no_reviewed',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='report',
name='num_not_reviewed',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='report',
name='num_reviewed',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='reportfile',
name='review_status',
field=models.IntegerField(default=-1),
),
]

View File

@ -28,6 +28,9 @@ class Report(models.Model):
number_bad_images = models.IntegerField(default=0)
number_imei = models.IntegerField(default=0)
number_invoice = models.IntegerField(default=0)
num_reviewed = models.IntegerField(default=0)
num_not_reviewed = models.IntegerField(default=0)
num_no_reviewed = models.IntegerField(default=0)
number_imei_transaction = models.IntegerField(default=0)
number_invoice_transaction = models.IntegerField(default=0)

View File

@ -32,6 +32,7 @@ class ReportFile(models.Model):
bad_image_reason = models.TextField(default="")
counter_measures = models.TextField(default="")
error = models.TextField(default="")
review_status = models.IntegerField(default=-1) # -1: No need review, 0: not reviewed, 1: reviewed

View File

@ -17,8 +17,6 @@ from fwd import settings
from ..models import SubscriptionRequest, Report, ReportFile
import json
BAD_THRESHOLD = 0.75
valid_keys = ["retailername", "sold_to_party", "purchase_date", "imei_number"]
class ReportAccumulateByRequest:
@ -67,7 +65,8 @@ class ReportAccumulateByRequest:
'retailername': IterAvg(),
'sold_to_party': IterAvg()
},
'num_request': 0
'num_request': 0,
"review_process": []
}
self.day_format = {
'subs': sub,
@ -110,7 +109,8 @@ class ReportAccumulateByRequest:
'sold_to_party': IterAvg()
},
"report_files": [],
'num_request': 0
"num_request": 0,
"review_process": []
},
@staticmethod
@ -155,7 +155,7 @@ class ReportAccumulateByRequest:
total["usage"]["imei"] += 1 if doc_type == "imei" else 0
total["usage"]["invoice"] += 1 if doc_type == "invoice" else 0
total["usage"]["total_images"] += 1
total["review_process"].append(report_file.review_status)
return total
@staticmethod
@ -192,7 +192,7 @@ class ReportAccumulateByRequest:
print(f"[WARM]: Weird doctype: {report_file.doc_type}")
day_data["average_processing_time"][report_file.doc_type] = IterAvg()
day_data["average_processing_time"][report_file.doc_type].add_avg(report_file.time_cost, 1) if report_file.time_cost else 0
day_data["review_process"].append(report_file.review_status)
return day_data
def add(self, request, report_files):
@ -319,6 +319,7 @@ class ReportAccumulateByRequest:
_data[month][1][day]["reviewed_accuracy"]["purchase_date"] = _data[month][1][day]["reviewed_accuracy"]["purchase_date"]()
_data[month][1][day]["reviewed_accuracy"]["retailername"] = _data[month][1][day]["reviewed_accuracy"]["retailername"]()
_data[month][1][day]["reviewed_accuracy"]["sold_to_party"] = _data[month][1][day]["reviewed_accuracy"]["sold_to_party"]()
_data[month][1][day]["review_process"] = _data[month][1][day]["review_process"].count(1)/(_data[month][1][day]["review_process"].count(0)+ _data[month][1][day]["review_process"].count(1)) if (_data[month][1][day]["review_process"].count(0)+ _data[month][1][day]["review_process"].count(1)) >0 else 0
_data[month][1][day].pop("report_files")
_data[month][1][day]["images_quality"]["successful_percent"] = _data[month][1][day]["images_quality"]["successful"]/_data[month][1][day]["total_images"] if _data[month][1][day]["total_images"] > 0 else 0
@ -342,6 +343,7 @@ class ReportAccumulateByRequest:
_data[month][0]["reviewed_accuracy"]["purchase_date"] = _data[month][0]["reviewed_accuracy"]["purchase_date"]()
_data[month][0]["reviewed_accuracy"]["retailername"] = _data[month][0]["reviewed_accuracy"]["retailername"]()
_data[month][0]["reviewed_accuracy"]["sold_to_party"] = _data[month][0]["reviewed_accuracy"]["sold_to_party"]()
_data[month][0]["review_process"] = _data[month][0]["review_process"].count(1)/(_data[month][0]["review_process"].count(0)+ _data[month][0]["review_process"].count(1)) if (_data[month][0]["review_process"].count(0)+ _data[month][0]["review_process"].count(1)) >0 else 0
return _data
@ -716,6 +718,7 @@ def calculate_avg_accuracy(acc, type, keys=[]):
return sum(acc_list)/len(acc_list) if len(acc_list) > 0 else None
# Deprecated
def calculate_and_save_subcription_file(report, request):
request_att = {"acc": {"feedback": {"imei_number": [],
"purchase_date": [],
@ -816,6 +819,7 @@ def calculate_a_request(report, request):
"total_images": 0,
"bad_images": 0,
"bad_image_list": [],
"is_reviewed": [], # -1: No need to review, 0: Not reviewed, 1: Reviewed
}
images = SubscriptionRequestFile.objects.filter(request=request, file_category=FileCategory.Origin.value)
report_files = []
@ -851,6 +855,9 @@ def calculate_a_request(report, request):
if len(att["normalized_data"]["reviewed"].get("purchase_date", [])) > 0:
image.predict_result["purchase_date"] = [att["normalized_data"]["reviewed"]["purchase_date"][i][0] for i in range(len(att["normalized_data"]["reviewed"]["purchase_date"]))]
image.reviewed_result["purchase_date"] = att["normalized_data"]["reviewed"]["purchase_date"][rv_max_indexes["purchase_date"]][1]
if request.is_reviewed:
att["is_reviewed"] = 1
request_att["is_reviewed"].append(att["is_reviewed"])
new_report_file = ReportFile(report=report,
subsidiary=_sub,
correspond_request_id=request.request_id,
@ -863,11 +870,12 @@ def calculate_a_request(report, request):
reviewed_accuracy=att["acc"]["reviewed"],
acc=att["avg_acc"],
is_bad_image=att["is_bad_image"],
is_reviewed="Yes" if request.is_reviewed else "No",
is_reviewed= "Yes" if request.is_reviewed else "No",
time_cost=image.processing_time,
bad_image_reason=image.reason,
counter_measures=image.counter_measures,
error="|".join(att["err"])
error="|".join(att["err"]),
review_status=att["is_reviewed"],
)
report_files.append(new_report_file)
@ -920,7 +928,9 @@ def calculate_subcription_file(subcription_request_file):
"reviewed": {}},
"err": [],
"is_bad_image": False,
"avg_acc": None}
"avg_acc": None,
"is_reviewed": -1, # -1: No need to review, 0: Not reviewed, 1: Reviewed
}
if not subcription_request_file.predict_result:
return 400, att
@ -942,24 +952,21 @@ def calculate_subcription_file(subcription_request_file):
# print(f"[DEBUG]: e: {e} -key_name: {key_name}")
subcription_request_file.feedback_accuracy = att["acc"]["feedback"]
subcription_request_file.reviewed_accuracy = att["acc"]["reviewed"]
subcription_request_file.save()
avg_reviewed = calculate_avg_accuracy(att["acc"], "reviewed", ["retailername", "sold_to_party", "purchase_date", "imei_number"])
avg_feedback = calculate_avg_accuracy(att["acc"], "feedback", ["retailername", "sold_to_party", "purchase_date", "imei_number"])
if avg_feedback is not None or avg_reviewed is not None:
avg_acc = 0
if avg_feedback is not None:
avg_acc = avg_feedback
if avg_feedback < settings.NEED_REVIEW:
att["is_reviewed"] = 0
if avg_reviewed is not None:
avg_acc = avg_reviewed
att["is_reviewed"] = 1
att["avg_acc"] = avg_acc
if avg_acc < BAD_THRESHOLD:
if avg_acc < settings.BAD_THRESHOLD:
att["is_bad_image"] = True
# exclude bad images
# for key_name in valid_keys:
# att["acc"]["feedback"][key_name] = []
# att["acc"]["reviewed"][key_name] = []
# att["avg_acc"] = None
return 200, att
def calculate_attributions(request): # for one request, return in order
@ -1005,11 +1012,11 @@ def calculate_attributions(request): # for one request, return in order
avg_invoice_feedback = calculate_avg_accuracy(acc, "feedback", ["retailername", "sold_to_party", "purchase_date"])
avg_invoice_reviewed = calculate_avg_accuracy(acc, "reviewed", ["retailername", "sold_to_party", "purchase_date"])
if avg_invoice_feedback is not None or avg_invoice_reviewed is not None:
if max([x for x in [avg_invoice_feedback, avg_invoice_reviewed] if x is not None]) < BAD_THRESHOLD:
if max([x for x in [avg_invoice_feedback, avg_invoice_reviewed] if x is not None]) < settings.BAD_THRESHOLD:
image_quality_num[1] += 1
for i, _ in enumerate(acc["feedback"]["imei_number"]):
if acc["feedback"]["imei_number"][i] is not None and acc["reviewed"]["imei_number"][i] is not None:
if max([x for x in [acc["feedback"]["imei_number"][i], acc["reviewed"]["imei_number"][i]] if x is not None]) < BAD_THRESHOLD:
if max([x for x in [acc["feedback"]["imei_number"][i], acc["reviewed"]["imei_number"][i]] if x is not None]) < settings.BAD_THRESHOLD:
image_quality_num[1] += 1
# time cost and quality calculation
# TODO: to be deprecated, doc_type would be in file level in the future

View File

@ -489,7 +489,7 @@ def dict2xlsx(input: json, _type='report'):
'O': 'average_accuracy_rate.retailer_name',
'P': 'average_processing_time.imei',
'Q': 'average_processing_time.invoice',
'R': 'preview_process'
'R': 'review_process'
}
start_index = 5
@ -527,13 +527,13 @@ def dict2xlsx(input: json, _type='report'):
ws[key + str(start_index)] = value
ws[key + str(start_index)].border = border
ws[key + str(start_index)].font = font_black
if 'accuracy' in mapping[key] or 'time' in mapping[key] or 'percent' in mapping[key] or 'speed' in mapping[key]:
if 'accuracy' in mapping[key] or 'time' in mapping[key] or 'percent' in mapping[key] or 'speed' in mapping[key] or mapping[key] in ["review_process"]:
ws[key + str(start_index)].number_format = '0.0'
if _type == 'report':
if subtotal['subs'] == '+':
ws[key + str(start_index)].font = font_black_bold
if key in ['A', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q']:
if key in ['A', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R']:
ws[key + str(start_index)].fill = fill_gray
elif key == 'B':
ws[key + str(start_index)].fill = fill_green

View File

@ -29,7 +29,8 @@ update_data = {
"report_overview_duration"
def update_report(login_token, report_overview_duration=["30d", "7d"], subsidiary=["SEAO", "SEAU", "SESP", "SME", "SEPCO", "TSE", "SEIN"]):
# def update_report(login_token, report_overview_duration=["30d", "7d"], subsidiary=["all", "SEAU", "SESP", "SME", "SEPCO", "TSE", "SEIN"]):
def update_report(login_token, report_overview_duration=["7d", "30d"], subsidiary=["SEAO", "SEAU", "SESP", "SME", "SEPCO", "TSE", "SEIN"]):
headers = {'Authorization': login_token}
for dur in report_overview_duration:
for sub in subsidiary:

View File

@ -21,6 +21,7 @@ interface DataType {
invoiceAPT: number;
snImeiTC: number; // TC: transaction count
invoiceTC: number;
reviewProcess: number;
}
const columns: TableColumnsType<DataType> = [
@ -223,7 +224,7 @@ const columns: TableColumnsType<DataType> = [
const isAbnormal = ensureMax(record.snImeiAPT, 2);
return (
<span style={{ color: isAbnormal ? 'red' : '' }}>
{record?.snImeiAPT?.toFixed(2)}
{record?.snImeiAPT?.toFixed(1)}
</span>
);
},
@ -236,13 +237,26 @@ const columns: TableColumnsType<DataType> = [
const isAbnormal = ensureMax(record.invoiceAPT, 2);
return (
<span style={{ color: isAbnormal ? 'red' : '' }}>
{record?.invoiceAPT?.toFixed(2)}
{record?.invoiceAPT?.toFixed(1)}
</span>
);
},
},
],
},
{
title: 'Review Process',
dataIndex: 'review_process',
key: 'review_process',
width: '100px',
render: (_, record) => {
return (
<span>
{formatPercent(record.reviewProcess)==='-'? 0:formatPercent(record.reviewProcess)}
</span>
);
},
},
];
interface ReportOverViewTableProps {
@ -275,6 +289,7 @@ const ReportOverViewTable: React.FC<ReportOverViewTableProps> = ({
invoiceAPT: item.average_processing_time.invoice,
snImeiTC: item.usage.imei,
invoiceTC: item.usage.invoice,
reviewProcess:item.review_process,
};
},
);

View File

@ -174,8 +174,8 @@ services:
- ./cope2n-api:/app
working_dir: /app
# command: sh -c "celery -A fwd_api.celery_worker.worker worker -l INFO -c 5"
command: bash -c "tail -f > /dev/null"
command: sh -c "celery -A fwd_api.celery_worker.worker worker -l INFO -c 5"
# command: bash -c "tail -f > /dev/null"
# Back-end persistent
db-sbt:

View File

@ -10,9 +10,9 @@ from dotenv import load_dotenv
load_dotenv("../.env_prod")
OUTPUT_NAME = "Jan"
START_DATE = datetime(2024, 1, 1, tzinfo=timezone('Asia/Ho_Chi_Minh'))
END_DATE = datetime(2024, 2, 1, tzinfo=timezone('Asia/Ho_Chi_Minh'))
OUTPUT_NAME = "all_0219_0226"
START_DATE = datetime(2024, 2, 19, tzinfo=timezone('Asia/Ho_Chi_Minh'))
END_DATE = datetime(2024, 2, 27, tzinfo=timezone('Asia/Ho_Chi_Minh'))
# Database connection details
db_host = os.environ.get('DB_HOST', "")
@ -63,31 +63,31 @@ cursor.close()
conn.close()
# # Download folders from S3
# s3_client = boto3.client(
# 's3',
# aws_access_key_id=access_key,
# aws_secret_access_key=secret_key
# )
s3_client = boto3.client(
's3',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
# request_ids = []
# for rq in data:
# rq_id = rq[3]
# request_ids.append(rq_id)
request_ids = []
for rq in data:
rq_id = rq[3]
request_ids.append(rq_id)
# for request_id in tqdm(request_ids):
# folder_key = f"{s3_folder_prefix}/{request_id}/" # Assuming folder structure like: s3_bucket_name/s3_folder_prefix/request_id/
# local_folder_path = f"{OUTPUT_NAME}/{request_id}/" # Path to the local folder to save the downloaded files
# os.makedirs(OUTPUT_NAME, exist_ok=True)
# os.makedirs(local_folder_path, exist_ok=True)
for request_id in tqdm(request_ids):
folder_key = f"{s3_folder_prefix}/{request_id}/" # Assuming folder structure like: s3_bucket_name/s3_folder_prefix/request_id/
local_folder_path = f"{OUTPUT_NAME}/{request_id}/" # Path to the local folder to save the downloaded files
os.makedirs(OUTPUT_NAME, exist_ok=True)
os.makedirs(local_folder_path, exist_ok=True)
# # List objects in the S3 folder
# response = s3_client.list_objects_v2(Bucket=s3_bucket_name, Prefix=folder_key)
# objects = response.get('Contents', [])
# List objects in the S3 folder
response = s3_client.list_objects_v2(Bucket=s3_bucket_name, Prefix=folder_key)
objects = response.get('Contents', [])
# for s3_object in objects:
# object_key = s3_object['Key']
# local_file_path = local_folder_path + object_key.split('/')[-1] # Extracting the file name from the object key
for s3_object in objects:
object_key = s3_object['Key']
local_file_path = local_folder_path + object_key.split('/')[-1] # Extracting the file name from the object key
# # Download the S3 object to the local file
# s3_client.download_file(s3_bucket_name, object_key, local_file_path)
# Download the S3 object to the local file
s3_client.download_file(s3_bucket_name, object_key, local_file_path)