Delete job statistics tasks

The tasks are no longer being used, so can be deleted safely:
* record_initial_job_statistics
* record_outcome_job_statistics
* timeout-job-statistics

The test file for the statistics tasks was deleted in a previous commit.
This commit is contained in:
Katie Smith
2018-03-05 15:59:58 +00:00
parent 40a0b8f03c
commit 4f7dd1d258
3 changed files with 0 additions and 80 deletions

View File

@@ -39,7 +39,6 @@ from app.dao.notifications_dao import (
dao_get_scheduled_notifications,
set_scheduled_notification_to_processed,
)
from app.dao.statistics_dao import dao_timeout_job_statistics
from app.dao.provider_details_dao import (
get_current_provider,
dao_toggle_sms_provider
@@ -282,15 +281,6 @@ def switch_current_sms_provider_on_slow_delivery():
dao_toggle_sms_provider(current_provider.identifier)
@notify_celery.task(name='timeout-job-statistics')
@statsd(namespace="tasks")
def timeout_job_statistics():
updated = dao_timeout_job_statistics(current_app.config.get('SENDING_NOTIFICATIONS_TIMEOUT_PERIOD'))
if updated:
current_app.logger.info(
"Timeout period reached for {} job statistics, failure count has been updated.".format(updated))
@notify_celery.task(name="delete-inbound-sms")
@statsd(namespace="tasks")
def delete_inbound_sms_older_than_seven_days():

View File

@@ -1,62 +0,0 @@
from celery.signals import worker_process_shutdown
from notifications_utils.statsd_decorators import statsd
from sqlalchemy.exc import SQLAlchemyError
from app import notify_celery
from flask import current_app
from app.dao.statistics_dao import (
create_or_update_job_sending_statistics,
update_job_stats_outcome_count
)
from app.dao.notifications_dao import get_notification_by_id
from app.config import QueueNames
@worker_process_shutdown.connect
def worker_process_shutdown(sender, signal, pid, exitcode):
current_app.logger.info('Statistics worker shutdown: PID: {} Exitcode: {}'.format(pid, exitcode))
@notify_celery.task(bind=True, name='record_initial_job_statistics', max_retries=20, default_retry_delay=10)
@statsd(namespace="tasks")
def record_initial_job_statistics(self, notification_id):
notification = None
try:
notification = get_notification_by_id(notification_id)
if notification:
create_or_update_job_sending_statistics(notification)
else:
raise SQLAlchemyError("Failed to find notification with id {}".format(notification_id))
except SQLAlchemyError as e:
current_app.logger.exception(e)
self.retry(queue=QueueNames.RETRY)
except self.MaxRetriesExceededError:
current_app.logger.error(
"RETRY FAILED: task record_initial_job_statistics failed for notification {}".format(
notification.id if notification else "missing ID"
)
)
@notify_celery.task(bind=True, name='record_outcome_job_statistics', max_retries=20, default_retry_delay=10)
@statsd(namespace="tasks")
def record_outcome_job_statistics(self, notification_id):
notification = None
try:
notification = get_notification_by_id(notification_id)
if notification:
updated_count = update_job_stats_outcome_count(notification)
if updated_count == 0:
self.retry(queue=QueueNames.RETRY)
else:
raise SQLAlchemyError("Failed to find notification with id {}".format(notification_id))
except SQLAlchemyError as e:
current_app.logger.exception(e)
self.retry(queue=QueueNames.RETRY)
except self.MaxRetriesExceededError:
current_app.logger.error(
"RETRY FAILED: task update_job_stats_outcome_count failed for notification {}".format(
notification.id if notification else "missing ID"
)
)