code review feedback and merge from main

This commit is contained in:
Kenneth Kehl
2024-09-11 09:39:18 -07:00
34 changed files with 256 additions and 161 deletions

View File

@@ -54,9 +54,8 @@ def cleanup_unfinished_jobs():
try:
acceptable_finish_time = job.processing_started + timedelta(minutes=5)
except TypeError:
current_app.logger.error(
current_app.logger.exception(
f"Job ID {job.id} processing_started is {job.processing_started}.",
exc_info=True,
)
raise
if now > acceptable_finish_time:
@@ -194,9 +193,7 @@ def delete_inbound_sms():
)
)
except SQLAlchemyError:
current_app.logger.exception(
"Failed to delete inbound sms notifications", exc_info=True
)
current_app.logger.exception("Failed to delete inbound sms notifications")
raise

View File

@@ -114,7 +114,7 @@ def process_ses_results(self, response):
raise
except Exception:
current_app.logger.exception("Error processing SES results", exc_info=True)
current_app.logger.exception("Error processing SES results")
self.retry(queue=QueueNames.RETRY)
@@ -206,7 +206,7 @@ def handle_complaint(ses_message):
reference = ses_message["mail"]["messageId"]
except KeyError:
current_app.logger.exception(
"Complaint from SES failed to get reference from message", exc_info=True
"Complaint from SES failed to get reference from message"
)
return
notification = dao_get_notification_history_by_reference(reference)

View File

@@ -144,12 +144,10 @@ def deliver_sms(self, notification_id):
if isinstance(e, SmsClientResponseException):
current_app.logger.warning(
"SMS notification delivery for id: {} failed".format(notification_id),
exc_info=True,
)
else:
current_app.logger.exception(
"SMS notification delivery for id: {} failed".format(notification_id),
exc_info=True,
)
try:
@@ -188,9 +186,7 @@ def deliver_email(self, notification_id):
notification.personalisation = json.loads(personalisation)
send_to_providers.send_email_to_provider(notification)
except EmailClientNonRetryableException:
current_app.logger.exception(
f"Email notification {notification_id} failed", exc_info=True
)
current_app.logger.exception(f"Email notification {notification_id} failed")
update_notification_status_by_id(notification_id, "technical-failure")
except Exception as e:
try:
@@ -200,7 +196,7 @@ def deliver_email(self, notification_id):
)
else:
current_app.logger.exception(
f"RETRY: Email notification {notification_id} failed", exc_info=True
f"RETRY: Email notification {notification_id} failed"
)
self.retry(queue=QueueNames.RETRY)

View File

@@ -46,7 +46,7 @@ def run_scheduled_jobs():
"Job ID {} added to process job queue".format(job.id)
)
except SQLAlchemyError:
current_app.logger.exception("Failed to run scheduled jobs", exc_info=True)
current_app.logger.exception("Failed to run scheduled jobs")
raise
@@ -61,7 +61,7 @@ def delete_verify_codes():
)
)
except SQLAlchemyError:
current_app.logger.exception("Failed to delete verify codes", exc_info=True)
current_app.logger.exception("Failed to delete verify codes")
raise
@@ -74,7 +74,7 @@ def expire_or_delete_invitations():
f"Expire job started {start} finished {utc_now()} expired {expired_invites} invitations"
)
except SQLAlchemyError:
current_app.logger.exception("Failed to expire invitations", exc_info=True)
current_app.logger.exception("Failed to expire invitations")
raise
try:
@@ -84,7 +84,7 @@ def expire_or_delete_invitations():
f"Delete job started {start} finished {utc_now()} deleted {deleted_invites} invitations"
)
except SQLAlchemyError:
current_app.logger.exception("Failed to delete invitations", exc_info=True)
current_app.logger.exception("Failed to delete invitations")
raise

View File

@@ -158,11 +158,10 @@ def __total_sending_limits_for_job_exceeded(service, job, job_id):
job.job_status = "sending limits exceeded"
job.processing_finished = utc_now()
dao_update_job(job)
current_app.logger.error(
current_app.logger.exception(
"Job {} size {} error. Total sending limits {} exceeded".format(
job_id, job.notification_count, service.message_limit
),
exc_info=True,
)
return True
@@ -361,9 +360,8 @@ def save_api_email_or_sms(self, encrypted_notification):
try:
self.retry(queue=QueueNames.RETRY)
except self.MaxRetriesExceededError:
current_app.logger.error(
current_app.logger.exception(
f"Max retry failed Failed to persist notification {notification['id']}",
exc_info=True,
)
@@ -379,11 +377,11 @@ def handle_exception(task, notification, notification_id, exc):
# SQLAlchemy is throwing a FlushError. So we check if the notification id already exists then do not
# send to the retry queue.
# This probably (hopefully) is not an issue with Redis as the celery backing store
current_app.logger.exception("Retry" + retry_msg, exc_info=True)
current_app.logger.exception("Retry" + retry_msg)
try:
task.retry(queue=QueueNames.RETRY, exc=exc)
except task.MaxRetriesExceededError:
current_app.logger.error("Max retry failed" + retry_msg, exc_info=True)
current_app.logger.exception("Max retry failed" + retry_msg)
@notify_celery.task(
@@ -432,10 +430,9 @@ def send_inbound_sms_to_service(self, inbound_sms_id, service_id):
try:
self.retry(queue=QueueNames.RETRY)
except self.MaxRetriesExceededError:
current_app.logger.error(
current_app.logger.exception(
"Retry: send_inbound_sms_to_service has retried the max number of"
+ f"times for service: {service_id} and inbound_sms {inbound_sms_id}",
exc_info=True,
+ f"times for service: {service_id} and inbound_sms {inbound_sms_id}"
)
else:
current_app.logger.warning(
@@ -449,6 +446,11 @@ def regenerate_job_cache():
s3.get_s3_files()
@notify_celery.task(name="delete-old-s3-objects")
def delete_old_s3_objects():
s3.cleanup_old_s3_objects()
@notify_celery.task(name="process-incomplete-jobs")
def process_incomplete_jobs(job_ids):
jobs = [dao_get_job_by_id(job_id) for job_id in job_ids]