From 0ab18b06b401ef04480067f711a86a7b58c1c085 Mon Sep 17 00:00:00 2001 From: PhanThanhTrung Date: Wed, 6 Mar 2024 14:56:02 +0700 Subject: [PATCH] update billing report --- cope2n-api/billing_report.xlsx | Bin 0 -> 6555 bytes cope2n-api/fwd_api/api/accuracy_view.py | 10 +- .../celery_worker/process_report_tasks.py | 124 ++++++++++++------ .../migrations/0185_report_report_type.py | 18 +++ cope2n-api/fwd_api/models/Report.py | 5 +- .../request/ReportCreationSerializer.py | 5 + cope2n-api/fwd_api/utils/accuracy.py | 47 +++++++ cope2n-api/fwd_api/utils/file.py | 73 +++++++---- 8 files changed, 209 insertions(+), 73 deletions(-) create mode 100644 cope2n-api/billing_report.xlsx mode change 100644 => 100755 cope2n-api/fwd_api/api/accuracy_view.py mode change 100644 => 100755 cope2n-api/fwd_api/celery_worker/process_report_tasks.py create mode 100755 cope2n-api/fwd_api/migrations/0185_report_report_type.py mode change 100644 => 100755 cope2n-api/fwd_api/models/Report.py mode change 100644 => 100755 cope2n-api/fwd_api/request/ReportCreationSerializer.py mode change 100644 => 100755 cope2n-api/fwd_api/utils/file.py diff --git a/cope2n-api/billing_report.xlsx b/cope2n-api/billing_report.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..a765347a97054b17e384a60856ca5e1c548c0f43 GIT binary patch literal 6555 zcmaJ_1z1#D*QUF>8%B`s5)h;r8bKr^Mw)>cq)P+@hHgPX1OWl*yfhLrbeFUu-Jw$d z%)R&jzw_rwcfqM`sVn$p~VqKE}ES23=df zThwypT@>Wfw1_l%6p7ZtWfM@?)qm~oBOpHbHn4|OQdJ+U!~==b(Ks1cxew3x^N82L z+c*Ylj-r0E*|9~rr|=SDZsa9=Z5y#3`e-Mb%-!zc+nFGY=1`!ElbEYIGlQ?3$f_}0 z607GY4Xr%?Ldj+_LQpuK&mt(Gv43elnTVD^Pz^|$#=5@uiE6w)mh~#Re=|Lu{ExO(sBASG=-Nu*c=SwXIZ^uWEM{9gF&c#Pon?$u zSDCXUcrtKT46GPw2vcDLUlDJNWhx_sp!Ua@hIjYlE9&4(JB*ze1+J0imUVJJ9mMjOuiL; zkG`)D4vep^o2l>P^x%D9qdG%he-8KHpWwG%Voq92_MvKoH%{QOd7}2arZQ6jT+ZH4 z`Nb{XrLQEb%)V$H)F^7$-KL{Z#&h9`wav%^JE6m=@?OF z6*k%4(S6=Y)fN0jZO4V%u|E~2sOIfRf8=c*^p$cnws9c6%|9;y+?;H8p_HZT*@ef? zrPm#9rI3X1s zVIqNb8$%_@`2`o2kVH1I5xh?hkv`b$Y-h)7wt|s`N%z4OcrRN7KS%rJgy*>n%yQrY z`7L^@zff<&bB8vm>o57}eyTCOnX$01|Mt+P#0@&6P{tsVlQ5qv_oq@{+ zvTIZn6i@VjD>3rFQpdv{F|w;?AO>&FsM$mM>kjl`3oXQXN#?)&$;-fK(dt4kDNPcB=n+MLRw0hEmB{8WZ)7k+r&xA(m9Qi=HGU=G_wVH4yTK;}g2D?{98;-qfES=9W^HN5lrW%1xk z?m(EpnSa1%(j0W%o+p%7GkPjXdQ}}*EKBzil!RZ6DfzeOoE(U%eW%aJPB5Z+ zxpq%(cV*TC?wNtXvXu3@y;rY`_<+bzJvUpMr_}(0%}w{?ltUKXyAJp?j;*8b@EYFi z8SD77WHS}TEffExA#1g(&wbkEJ^Pj|jP4@Ja3f7q{Kjw&A#2KAQHhEyamg%qI&UBH zaaAq|MJd&mnJqWViCe;XhuWQpq(UNWML2eK>Sd;9|TSeBT3#R!Fjh9Fu zg>(wz!ue@<-9(W~jrl08MFVN5@M0>~J8zoO305oX=V^&tIRfl4m&ylBxV)&iR0IHW zJ-rNe*ilm~eMnP=0U|TVLb2nWbt(;Mm{{oDK?5EP2rSX9QY2Z%@RL1G!Aqqv#ryK zm^<#?oZ*sZuu4?^NRnuS>a7=?U2bxwlzz+suzQJ}Wbo<({A-Z!XpBJfM$bO?>%%$* zRP)i-V7S9iQf0ava|M%pJ~7HFLI$Z(p-oHM$Dp{uI?<~ZaQ`vfQlU;oq9^6*HQw7t z#X$i{L_Fqa_s8A=k~OhOVo&fR8T@tP7gevOs=nhRYomsM6Mr5-spACyRvUzW)dtkV z#~B1gR>kfkz4A^1al&F1BIE`d1CV>$9i1|7!sXKV^gE3ZCr3q0JoT3hesJaP>4 ziG8(f8>XGFA}}q_Q7bKUKRD*3%K4S5H3$RpPeUIDyO3^UX$rfN6nc@oXEWfMn zW`X|R_F5K)t*t9L?^!4D*8;=Zv`BPgQx)*h!0xmeeIDjfbd%v1Q@B7Vc_i)8bmWq> z2|%V}IB+jss5I$k$vLgat}WJrNJr>NGL;5PdYzjY**JT5v@0j(8c|wuR-LC<{h&DqYls%ioJ4+_lOo zC{9}uN(f-{0YG%1K*p1kxF+?25NP((wDtFsQoxB3*#Sdi>7}^zAHZm(fo`X5uJ!*t z$b?`A8LiyZ)g}7X4LoAUPWt3Gz1Ej2+f6>Nn45cFrnUAkd+DSQpZe#W?C)k>TWVyk zy&Sn=bAmyq)<2dG?mW7fVvZaB#ygJ1R?ZFzYpxy4=Ztm@_ zs;NVV7#(AgC{NGVS9!A-D&`YJgX8YTW|43A%Z*7AeY^t!i2_|d{Sb7IdZ0(r(A-#q z`jH6$T=6*C|2lVXeh<&5mBerV?Cq6zag?ahN=9QED1x!5 z$;6iou5EA5ngSN&Z>XKGu#pr2PQ9zSfmk{<=>Lr(Xnu8GP<{MF1|7!=fx8j zb3DL)5?rEe;%IK!T?yCJXQhnHaFgT?EsYQV@VUD<6~oH!y|sHP$6AA8^2?#l6n_|@ z9)&1inr+EA)XDAvS-5CJxrS*;&m4PT(mmc$Qbm``%?9sXZBxI^3cK>RjT9Ob(s1Rk z=G&@LFgt*|9DL`T_Z>UVsRFQ|s>dXyjT4U7_cGv$$a%VbB2~CJ$GLq4;<_7@bpQS< zt|M4`{1-GK<>gWpnJU(welEkOE#I+1)+H^5Y=V5GdMS>qWbrw;saASVww^u_E$t-M zh%K%rGUSeRIsgyHI}SWru~R?f`-+ioy4P|b;BJQNiEh>|vdeS{46adO*(}2#2CL^j zPNH*>2u&~}|J>qLKTV(@t~2?tVhV)|b56v?3`LTa{s*e8Md;^|zV`eg!mcR6Ivn|Y zt9bea`HpLE8mwVO!%PMt7H(kT&~-`6wW;Gqv$Bpg*x|6SpsG$$mL{PxGo54h4nd2U zu+c^f4m!pdy89x^T2@OFT~Uy;qM7)rIOf8$q>S1Cp?Jq(jEaLKYjgfi2hr?-p7FjS z5H19wM6k9js+Pk_KDY=KVCbv=qCjK+*zOPEial$XPJ0@Z#c)3q z=TP%n)14rH@>zRXu_^E4vhhz7hj#^D`x?ge z?d5R`(DQyaV!6kgA}TiSpIN`Q`bYm4$@g}DjFrR!3*rl z%m=M3jd>^o&M=*~k>Hij<#YtU%)Wt4zKg8`g8A~!rDX$R!`}u>M$_UsW1hHPBS!4(I%tz1+P84x!-&>d$NNi?9GxT#+#)KaGf?6oT&?9O1(it za>~Lkz5x*i3QF+5wjC6Iw;lFY5Rk2b2gJeE?$7QcC7}|b5|D&@Wq1C8i?!8`QUv*& zOR0}dqwy2P6$0e#wp(j5JlPd8j35j6v*D})W;T(^M2H>8OWv%Oy-op^8^(0Qs;7v3VaM5(D=#y9wtsK;NiLWb9VGbobd#i(`6_1hc4 z#RBw;p(g+AFz}0QJ+W`0)d!58G>OTC#napjg2OfZ2F?;Uof6vXty^rOa&P`vl-SO+ z1g7m*=OCd9Ihb7CL;!J#*ung>KtPIk!WL|!3jw=91#G|&5VA{)(KpxW6r>0^H|7mk zniEzSuSh6bQ70o8q+4?IqMiKUG}pPjH1Fd4=4g5T-udh7i^MN>lMykjR6 zb4SjJF@4M;U__W2uK!0~c*$5c!vywSY%B9=J;r+dCHZb}f1#0#;~k@F9@INoS#(yN zC|>cHi^0#n`mT!wdYcL8O0(s;tF6XsO@B!x?c_>2r@cOH71~*a=M(y2$qlvRvDY3; z?0jOzjl(ZH%ChvvW3e<3yi2@b8b&g?tX)7KI-=wbz_CJy^r!YLRVPG7)jWxj)D;JL3Yyr93$io z^H+?XK|R1O$S7qERpKF{6t=QZh%-BHh@CK21Cx|7Jda>+!xFPY`E z$M0P!m6FVoIyMU2?U)RY>9Z zZf0niFipyapIsTOpY5(M8aveaYG)NSN!oByZ#Inl2b;gtkHamoD@`KXrTy=VU{*hu zyg<8oYUI3Drci>@5MO@2J_fLaBnUMkxp@k=?zj`$q9`;n{h&^1?+Ic{A3UOeBkBQ+}<|?BrZYaH{X*@>kZPxy z>}E+*z9{UBLP>(xzW(dcX?4`#`@ZQ8JK$=sk~?;Q=hd+NRbnKVUB^!yi4jTC{I7+K zOp?b+U{?>2tB0kbj~fVTj>NMir5{Oo%I8;6pB^VSfS!|R%wfiH8on(}hy+ABT*mLF zp6$~DFg0-bjCjN=S6}%J4|GAs`ZT6S$x6vhLAJ>R>KHuJW<|p6I@L|z6SaC?c+`n6 zvke&Wv!|S|N9m^-v%J!*K@Ce#eB3A7I+sY7I{XOt#9y-cP2)qRjh4iTwA7oufODWlNi+92`&h7no?PrGtRTwG2*pg6pyEgJ@9xoKd z@S)DY`qfE=`h46s>!`ghm@n<&rEPX(SED zgC8#eefo#uTMqXgw6!v{0~7yhu0-5t!TX2piWdy0Wx177C?0|4FJzuI#cVRGEZ{J> zq+VYI3apObj9~CQeJ<|{qlWikSMIiy6HVxR5GN{U!|$a&bf0)3EpCr71-r6bhBdfZ z^e@KLi}!XWjAjz7JY8!Ks>0@W3B+IXLWazNcz_Fn2$wPzGXGK0C{TW>*tdmSq>BCD zc1zR#ec){+6RGR|G63RL|E%)i-@;3JL>M22)xA@BYNu=WnM literal 0 HcmV?d00001 diff --git a/cope2n-api/fwd_api/api/accuracy_view.py b/cope2n-api/fwd_api/api/accuracy_view.py old mode 100644 new mode 100755 index 9d14f51..1f83e49 --- a/cope2n-api/fwd_api/api/accuracy_view.py +++ b/cope2n-api/fwd_api/api/accuracy_view.py @@ -222,7 +222,11 @@ class AccuracyViewSet(viewsets.ViewSet): subsidiary = request.data.get("subsidiary", "all") is_daily_report = request.data.get('is_daily_report', False) report_overview_duration = request.data.get("report_overview_duration", "") + report_type = request.data.get("report_type", "accuracy") subsidiary = map_subsidiary_long_to_short(subsidiary) + + if report_type=="billing" and subsidiary.lower().replace(" ", "") not in settings.SUB_FOR_BILLING: + raise InvalidException(excArgs="Subsidiary for billing report") if is_daily_report: if report_overview_duration not in settings.OVERVIEW_REPORT_DURATION: @@ -258,11 +262,9 @@ class AccuracyViewSet(viewsets.ViewSet): "include_test": include_test, "subsidiary": subsidiary, "is_daily_report": is_daily_report, - "report_overview_duration": report_overview_duration + "report_overview_duration": report_overview_duration, + "report_type": report_type, } - # if is_daily_report: - # if (end_date-start_date) > timezone.timedelta(days=1): - # raise InvalidException(excArgs="Date range") report_id = "report" + "_" + timezone.datetime.now().strftime("%Y%m%d%H%M%S%z") + "_" + uuid.uuid4().hex new_report: Report = Report( diff --git a/cope2n-api/fwd_api/celery_worker/process_report_tasks.py b/cope2n-api/fwd_api/celery_worker/process_report_tasks.py old mode 100644 new mode 100755 index aeab9b1..80763f1 --- a/cope2n-api/fwd_api/celery_worker/process_report_tasks.py +++ b/cope2n-api/fwd_api/celery_worker/process_report_tasks.py @@ -3,7 +3,9 @@ import traceback from fwd_api.models import SubscriptionRequest, Report, ReportFile from fwd_api.celery_worker.worker import app from ..utils import s3 as S3Util -from ..utils.accuracy import update_temp_accuracy, IterAvg, calculate_and_save_subcription_file, count_transactions, extract_report_detail_list, calculate_a_request, ReportAccumulateByRequest +from ..utils.accuracy import (update_temp_accuracy, IterAvg, calculate_and_save_subcription_file, + count_transactions, extract_report_detail_list, calculate_a_request, + ReportAccumulateByRequest, create_billing_data) from ..utils.file import dict2xlsx, save_workbook_file, save_report_to_S3, save_images_to_csv_briefly from ..utils import time_stuff from ..utils.redis import RedisUtils @@ -162,31 +164,40 @@ def make_a_report(report_id, query_set): @app.task(name='make_a_report_2') def make_a_report_2(report_id, query_set): + report_type = query_set.pop("report_type", "accuracy") + if report_type == "accuracy": + create_accuracy_report(report_id=report_id, **query_set) + elif "billing": + create_billing_report(report_id=report_id, **query_set) + else: + raise TypeError("Invalid report type") + + +def create_accuracy_report(report_id, **kwargs): try: - start_date = timezone.datetime.strptime(query_set["start_date_str"], '%Y-%m-%dT%H:%M:%S%z') - end_date = timezone.datetime.strptime(query_set["end_date_str"], '%Y-%m-%dT%H:%M:%S%z') + start_date = timezone.datetime.strptime(kwargs["start_date_str"], '%Y-%m-%dT%H:%M:%S%z') + end_date = timezone.datetime.strptime(kwargs["end_date_str"], '%Y-%m-%dT%H:%M:%S%z') base_query = Q(created_at__range=(start_date, end_date)) - if query_set["request_id"]: - base_query &= Q(request_id=query_set["request_id"]) - if query_set["redemption_id"]: - base_query &= Q(redemption_id=query_set["redemption_id"]) + if kwargs["request_id"]: + base_query &= Q(request_id=kwargs["request_id"]) + if kwargs["redemption_id"]: + base_query &= Q(redemption_id=kwargs["redemption_id"]) base_query &= Q(is_test_request=False) - if isinstance(query_set["include_test"], str): - query_set["include_test"] = True if query_set["include_test"].lower() in ["true", "yes", "1"] else False - if query_set["include_test"]: + if isinstance(kwargs["include_test"], str): + include_test = True if kwargs["include_test"].lower() in ["true", "yes", "1"] else False + if include_test: # base_query = ~base_query base_query.children = base_query.children[:-1] - - elif isinstance(query_set["include_test"], bool): - if query_set["include_test"]: + elif isinstance(kwargs["include_test"], bool): + if kwargs["include_test"]: base_query = ~base_query - if isinstance(query_set["subsidiary"], str): - if query_set["subsidiary"] and query_set["subsidiary"].lower().replace(" ", "") not in settings.SUB_FOR_BILLING: - base_query &= Q(redemption_id__startswith=query_set["subsidiary"]) - if isinstance(query_set["is_reviewed"], str): - if query_set["is_reviewed"] == "reviewed": + if isinstance(kwargs["subsidiary"], str): + if kwargs["subsidiary"] and kwargs["subsidiary"].lower().replace(" ", "") not in settings.SUB_FOR_BILLING: + base_query &= Q(redemption_id__startswith=kwargs["subsidiary"]) + if isinstance(kwargs["is_reviewed"], str): + if kwargs["is_reviewed"] == "reviewed": base_query &= Q(is_reviewed=True) - elif query_set["is_reviewed"] == "not reviewed": + elif kwargs["is_reviewed"] == "not reviewed": base_query &= Q(is_reviewed=False) errors = [] @@ -213,8 +224,7 @@ def make_a_report_2(report_id, query_set): # TODO: Multithreading # Calculate accuracy, processing time, ....Then save. subscription_requests = SubscriptionRequest.objects.filter(base_query).order_by('created_at') - report: Report = \ - Report.objects.filter(report_id=report_id).first() + report: Report = Report.objects.filter(report_id=report_id).first() # TODO: number of transaction by doc type num_request = 0 report_files = [] @@ -226,14 +236,14 @@ def make_a_report_2(report_id, query_set): request_att, _report_files = calculate_a_request(report, request) report_files += _report_files report_engine.add(request, _report_files) - request.feedback_accuracy = {"imei_number" : mean_list(request_att["acc"]["feedback"].get("imei_number", [None])), - "purchase_date" : mean_list(request_att["acc"]["feedback"].get("purchase_date", [None])), - "retailername" : mean_list(request_att["acc"]["feedback"].get("retailername", [None])), - "sold_to_party" : mean_list(request_att["acc"]["feedback"].get("sold_to_party", [None]))} - request.reviewed_accuracy = {"imei_number" : mean_list(request_att["acc"]["reviewed"].get("imei_number", [None])), - "purchase_date" : mean_list(request_att["acc"]["reviewed"].get("purchase_date", [None])), - "retailername" : mean_list(request_att["acc"]["reviewed"].get("retailername", [None])), - "sold_to_party" : mean_list(request_att["acc"]["reviewed"].get("sold_to_party", [None]))} + request.feedback_accuracy = {"imei_number": mean_list(request_att["acc"]["feedback"].get("imei_number", [None])), + "purchase_date": mean_list(request_att["acc"]["feedback"].get("purchase_date", [None])), + "retailername": mean_list(request_att["acc"]["feedback"].get("retailername", [None])), + "sold_to_party": mean_list(request_att["acc"]["feedback"].get("sold_to_party", [None]))} + request.reviewed_accuracy = {"imei_number": mean_list(request_att["acc"]["reviewed"].get("imei_number", [None])), + "purchase_date": mean_list(request_att["acc"]["reviewed"].get("purchase_date", [None])), + "retailername": mean_list(request_att["acc"]["reviewed"].get("retailername", [None])), + "sold_to_party": mean_list(request_att["acc"]["reviewed"].get("sold_to_party", [None]))} request.save() number_images += request_att["total_images"] number_bad_images += request_att["bad_images"] @@ -249,7 +259,7 @@ def make_a_report_2(report_id, query_set): num_request += 1 review_progress += request_att.get("is_reviewed", []) - report_fine_data, _save_data = report_engine.save(report.report_id, query_set.get("is_daily_report", False), query_set["include_test"]) + report_fine_data, _save_data = report_engine.save(report.report_id, kwargs.get("is_daily_report", False), kwargs["include_test"]) transaction_att = count_transactions(start_date, end_date, report.subsidiary) # Do saving process report.number_request = num_request @@ -260,27 +270,27 @@ def make_a_report_2(report_id, query_set): # FIXME: refactor this data stream for endurability report.average_OCR_time = {"invoice": time_cost["invoice"](), "imei": time_cost["imei"](), "invoice_count": time_cost["invoice"].count, "imei_count": time_cost["imei"].count} - - report.average_OCR_time["avg"] = (report.average_OCR_time["invoice"]*report.average_OCR_time["invoice_count"] + report.average_OCR_time["imei"]*report.average_OCR_time["imei_count"])/(report.average_OCR_time["imei_count"] + report.average_OCR_time["invoice_count"]) if (report.average_OCR_time["imei_count"] + report.average_OCR_time["invoice_count"]) > 0 else None - + + report.average_OCR_time["avg"] = (report.average_OCR_time["invoice"]*report.average_OCR_time["invoice_count"] + report.average_OCR_time["imei"]*report.average_OCR_time["imei_count"])/( + report.average_OCR_time["imei_count"] + report.average_OCR_time["invoice_count"]) if (report.average_OCR_time["imei_count"] + report.average_OCR_time["invoice_count"]) > 0 else None report.number_imei_transaction = transaction_att.get("imei", 0) report.number_invoice_transaction = transaction_att.get("invoice", 0) acumulated_acc = {"feedback": {}, - "reviewed": {}, - "acumulated": {}} + "reviewed": {}, + "acumulated": {}} for acc_type in ["feedback", "reviewed", "acumulated"]: avg_acc = IterAvg() for key in ["imei_number", "purchase_date", "retailername", "sold_to_party"]: acumulated_acc[acc_type][key] = accuracy[acc_type][key]() - acumulated_acc[acc_type][key+"_count"] = accuracy[acc_type][key].count + acumulated_acc[acc_type][key + "_count"] = accuracy[acc_type][key].count avg_acc.add_avg(acumulated_acc[acc_type][key], acumulated_acc[acc_type][key+"_count"]) acumulated_acc[acc_type]["avg"] = avg_acc() report.feedback_accuracy = acumulated_acc["feedback"] report.reviewed_accuracy = acumulated_acc["reviewed"] report.combined_accuracy = acumulated_acc["acumulated"] - + report.num_reviewed = review_progress.count(1) report.num_not_reviewed = review_progress.count(0) report.num_no_reviewed = review_progress.count(-1) @@ -294,8 +304,8 @@ def make_a_report_2(report_id, query_set): data = extract_report_detail_list(report_files, lower=True) data_workbook = dict2xlsx(data, _type='report_detail') local_workbook = save_workbook_file(report.report_id + ".xlsx", report, data_workbook) - s3_key=save_report_to_S3(report.report_id, local_workbook, 5) - if query_set["is_daily_report"]: + s3_key = save_report_to_S3(report.report_id, local_workbook, 5) + if kwargs["is_daily_report"]: # Save overview dashboard # multiple accuracy by 100 save_data = copy.deepcopy(_save_data) @@ -313,10 +323,9 @@ def make_a_report_2(report_id, query_set): for x_key in report_fine_data[i][key].keys(): report_fine_data[i][key][x_key] = report_fine_data[i][key][x_key]*100 data_workbook = dict2xlsx(report_fine_data, _type='report') - overview_filename = query_set["subsidiary"] + "_" + query_set["report_overview_duration"] + ".xlsx" + overview_filename = kwargs["subsidiary"] + "_" + kwargs["report_overview_duration"] + ".xlsx" local_workbook = save_workbook_file(overview_filename, report, data_workbook, settings.OVERVIEW_REPORT_ROOT) - s3_key=save_report_to_S3(report.report_id, local_workbook) - # redis_client.set_cache(settings.OVERVIEW_REPORT_ROOT, overview_filename.replace(".xlsx", ""), json.dumps(save_data)) + s3_key = save_report_to_S3(report.report_id, local_workbook) set_cache(overview_filename.replace(".xlsx", ""), save_data) except IndexError as e: @@ -327,3 +336,34 @@ def make_a_report_2(report_id, query_set): print("[ERROR]: an error occured while processing report: ", report_id) traceback.print_exc() return 400 + + +def create_billing_report(report_id, **kwargs): + try: + start_date = timezone.datetime.strptime( + kwargs["start_date_str"], '%Y-%m-%dT%H:%M:%S%z') + end_date = timezone.datetime.strptime( + kwargs["end_date_str"], '%Y-%m-%dT%H:%M:%S%z') + base_query = Q(created_at__range=(start_date, end_date)) + base_query &= Q(is_test_request=False) + + subscription_requests = SubscriptionRequest.objects.filter( + base_query).order_by('created_at') + report: Report = Report.objects.filter(report_id=report_id).first() + billing_data = create_billing_data(subscription_requests) + report.number_request = len(subscription_requests) + report.number_images = len(billing_data) + report.status = "Ready" + report.save() + data_workbook = dict2xlsx(billing_data, _type='billing_report') + local_workbook = save_workbook_file( + report.report_id + ".xlsx", report, data_workbook) + s3_key = save_report_to_S3(report.report_id, local_workbook) + except IndexError as e: + print(e) + traceback.print_exc() + print("NotFound request by report id, %d", report_id) + except Exception as e: + print("[ERROR]: an error occured while processing report: ", report_id) + traceback.print_exc() + return 400 diff --git a/cope2n-api/fwd_api/migrations/0185_report_report_type.py b/cope2n-api/fwd_api/migrations/0185_report_report_type.py new file mode 100755 index 0000000..c28a1bf --- /dev/null +++ b/cope2n-api/fwd_api/migrations/0185_report_report_type.py @@ -0,0 +1,18 @@ +# Generated by Django 4.1.3 on 2024-03-06 06:57 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('fwd_api', '0184_caching'), + ] + + operations = [ + migrations.AddField( + model_name='report', + name='report_type', + field=models.CharField(choices=[('BILLING', 'billing'), ('ACCURACY', 'accuracy')], default='accuracy', max_length=10), + ), + ] diff --git a/cope2n-api/fwd_api/models/Report.py b/cope2n-api/fwd_api/models/Report.py old mode 100644 new mode 100755 index 22df35a..ebfb393 --- a/cope2n-api/fwd_api/models/Report.py +++ b/cope2n-api/fwd_api/models/Report.py @@ -2,6 +2,7 @@ from django.db import models from django.utils import timezone from fwd_api.models.Subscription import Subscription + class Report(models.Model): # Metadata id = models.AutoField(primary_key=True) @@ -42,4 +43,6 @@ class Report(models.Model): feedback_accuracy = models.JSONField(null=True) reviewed_accuracy = models.JSONField(null=True) - combined_accuracy = models.JSONField(null=True) \ No newline at end of file + combined_accuracy = models.JSONField(null=True) + report_type = models.CharField(max_length=10, choices=[ + ("BILLING", "billing"), ("ACCURACY", "accuracy")], default="accuracy") diff --git a/cope2n-api/fwd_api/request/ReportCreationSerializer.py b/cope2n-api/fwd_api/request/ReportCreationSerializer.py old mode 100644 new mode 100755 index fac7c8a..a2d0110 --- a/cope2n-api/fwd_api/request/ReportCreationSerializer.py +++ b/cope2n-api/fwd_api/request/ReportCreationSerializer.py @@ -36,4 +36,9 @@ class ReportCreationSerializer(serializers.Serializer): report_overview_duration = serializers.CharField( help_text=f'open of {settings.OVERVIEW_REPORT_DURATION}', default=None + ) + report_type = serializers.ChoiceField( + help_text='What type of report to create', + choices=['billing', 'accuracy'], + default="accuracy" ) \ No newline at end of file diff --git a/cope2n-api/fwd_api/utils/accuracy.py b/cope2n-api/fwd_api/utils/accuracy.py index 5651de3..12a37a9 100644 --- a/cope2n-api/fwd_api/utils/accuracy.py +++ b/cope2n-api/fwd_api/utils/accuracy.py @@ -797,6 +797,53 @@ def acc_maximize_list_values(acc): pos[k] = acc[k].index(acc[k][0]) return acc, pos + +def create_billing_data(subscription_requests): + billing_data = [] + for request in subscription_requests: + if request.status != 200: + continue + images = SubscriptionRequestFile.objects.filter(request=request, file_category=FileCategory.Origin.value) + for image in images: + if not image.doc_type: + _doc_type = image.file_name.split("_")[1] + if _doc_type in ["imei", "invoice"]: + image.doc_type = _doc_type + image.save() + else: + _doc_type = image.doc_type + + doc_type = "SN/IMEI" if _doc_type == "imei" else "Invoice" + + _sub = "" + redemption_id = "" + if request.redemption_id: + _sub = map_subsidiary_short_to_long(request.redemption_id[:2]) + redemption_id = request.redemption_id + + format_to_time = '%m/%d/%Y %H:%M' + format_to_date = '%m/%d/%Y' + format_to_month = '%B %Y' + + rq_created_at = request.created_at + print(type(redemption_id)) + rq_created_at = timezone.make_aware(rq_created_at) + print(rq_created_at) + rq_month = rq_created_at.strftime(format_to_month) + rq_date = rq_created_at.strftime(format_to_date) + rq_time = rq_created_at.strftime(format_to_time) + + billing_data.append({ + "request_month": rq_month, + "subsidiary": _sub, + "image_type": doc_type, + "redemption_number": redemption_id, + "request_id": request.request_id, + "request_date": rq_date, + "request_time_(utc)": rq_time + }) + return billing_data + def calculate_a_request(report, request): request_att = {"acc": {"feedback": {"imei_number": [], "purchase_date": [], diff --git a/cope2n-api/fwd_api/utils/file.py b/cope2n-api/fwd_api/utils/file.py old mode 100644 new mode 100755 index aceccdc..71b175a --- a/cope2n-api/fwd_api/utils/file.py +++ b/cope2n-api/fwd_api/utils/file.py @@ -521,37 +521,58 @@ def dict2xlsx(input: json, _type='report'): } start_index = 4 + elif _type == 'billing_report': + wb = load_workbook(filename = 'billing_report.xlsx') + ws = wb['Sheet1'] + mapping = { + 'B': 'request_month', + 'C': 'subsidiary', + 'D': 'image_type', + 'E': 'redemption_number', + 'F': 'request_id', + 'G': "request_date", + 'H': "request_time_(utc)" + } + start_index = 4 + for subtotal in input: for key in mapping.keys(): - value = get_value(subtotal, mapping[key]) - ws[key + str(start_index)] = value - if key in ['C', 'D', 'E'] and value == 0: - ws[key + str(start_index)] = "-" - ws[key + str(start_index)].border = border - ws[key + str(start_index)].font = font_black - if 'accuracy' in mapping[key] or 'time' in mapping[key] or 'percent' in mapping[key] or 'speed' in mapping[key] or mapping[key] in ["review_progress"]: - ws[key + str(start_index)].number_format = '0.0' + if _type!="billing_report": + value = get_value(subtotal, mapping[key]) + ws[key + str(start_index)] = value + if key in ['C', 'D', 'E'] and value == 0: + ws[key + str(start_index)] = "-" + ws[key + str(start_index)].border = border + ws[key + str(start_index)].font = font_black + if 'accuracy' in mapping[key] or 'time' in mapping[key] or 'percent' in mapping[key] or 'speed' in mapping[key] or mapping[key] in ["review_progress"]: + ws[key + str(start_index)].number_format = '0.0' - if _type == 'report': - if subtotal['subs'] == '+': - ws[key + str(start_index)].font = font_black_bold - if key in ['A', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R']: - ws[key + str(start_index)].fill = fill_gray - elif key == 'B': - ws[key + str(start_index)].fill = fill_green - elif key in ['C', 'D', 'E', 'F', 'G', 'H']: - ws[key + str(start_index)].fill = fill_yellow - if 'average_accuracy_rate' in mapping[key] and type(value) in [int, float] and value < 98: + if _type == 'report': + if subtotal['subs'] == '+': + ws[key + str(start_index)].font = font_black_bold + if key in ['A', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R']: + ws[key + str(start_index)].fill = fill_gray + elif key == 'B': + ws[key + str(start_index)].fill = fill_green + elif key in ['C', 'D', 'E', 'F', 'G', 'H']: + ws[key + str(start_index)].fill = fill_yellow + if 'average_accuracy_rate' in mapping[key] and type(value) in [int, float] and value < 98: + ws[key + str(start_index)].font = font_red + elif 'average_processing_time' in mapping[key] and type(value) in [int, float] and value > 2.0: + ws[key + str(start_index)].font = font_red + elif 'bad_percent' in mapping[key] and type(value) in [int, float] and value > 10: + ws[key + str(start_index)].font = font_red + elif _type == 'report_detail': + if 'accuracy' in mapping[key] and type(value) in [int, float] and value < 75: ws[key + str(start_index)].font = font_red - elif 'average_processing_time' in mapping[key] and type(value) in [int, float] and value > 2.0: + elif 'speed' in mapping[key] and type(value) in [int, float] and value > 2.0: ws[key + str(start_index)].font = font_red - elif 'bad_percent' in mapping[key] and type(value) in [int, float] and value > 10: - ws[key + str(start_index)].font = font_red - elif _type == 'report_detail': - if 'accuracy' in mapping[key] and type(value) in [int, float] and value < 75: - ws[key + str(start_index)].font = font_red - elif 'speed' in mapping[key] and type(value) in [int, float] and value > 2.0: - ws[key + str(start_index)].font = font_red + else: + value = get_value(subtotal, mapping[key]) + value = "-" if value=="" else value + ws[key + str(start_index)] = value + ws[key + str(start_index)].border = border + ws[key + str(start_index)].font = font_black start_index += 1