Revert running status aggregation in parallel

The top-level task didn't run successfully after this was deployed
due to the worker being killed due to heavy disk usage. While the
more parallel version does log much more, it doesn't totally explain
the disk behaviour. Nonetheless, reverting it is sensible to give us
the time we need to investigate more.
This commit is contained in:
Ben Thorner
2022-01-20 10:39:23 +00:00
parent 0a88724ff5
commit 0f6dea0deb
3 changed files with 44 additions and 45 deletions

View File

@@ -45,6 +45,7 @@ def fetch_status_data_for_service_and_day(process_day, service_id, notification_
return db.session.query(
table.template_id,
table.service_id,
func.coalesce(table.job_id, '00000000-0000-0000-0000-000000000000').label('job_id'),
table.key_type,
table.status,
@@ -57,6 +58,7 @@ def fetch_status_data_for_service_and_day(process_day, service_id, notification_
table.key_type.in_((KEY_TYPE_NORMAL, KEY_TYPE_TEAM)),
).group_by(
table.template_id,
table.service_id,
'job_id',
table.key_type,
table.status
@@ -64,12 +66,11 @@ def fetch_status_data_for_service_and_day(process_day, service_id, notification_
@autocommit
def update_fact_notification_status(new_status_rows, process_day, notification_type, service_id):
def update_fact_notification_status(new_status_rows, process_day, notification_type):
table = FactNotificationStatus.__table__
FactNotificationStatus.query.filter(
FactNotificationStatus.bst_date == process_day,
FactNotificationStatus.notification_type == notification_type,
FactNotificationStatus.service_id == service_id,
).delete()
for row in new_status_rows:
@@ -77,7 +78,7 @@ def update_fact_notification_status(new_status_rows, process_day, notification_t
insert(table).values(
bst_date=process_day,
template_id=row.template_id,
service_id=service_id,
service_id=row.service_id,
job_id=row.job_id,
notification_type=notification_type,
key_type=row.key_type,