2024-03-06 10:28:34 -08:00
|
|
|
import json
|
2023-08-30 09:29:08 -07:00
|
|
|
import os
|
2023-05-04 07:56:24 -07:00
|
|
|
from datetime import datetime, timedelta
|
|
|
|
|
|
2016-06-03 14:54:46 +01:00
|
|
|
from flask import current_app
|
2016-10-13 15:27:47 +01:00
|
|
|
from sqlalchemy.orm.exc import NoResultFound
|
2016-06-03 14:54:46 +01:00
|
|
|
|
2024-03-06 10:28:34 -08:00
|
|
|
from app import aws_cloudwatch_client, notify_celery, redis_store
|
2020-12-30 17:06:49 +00:00
|
|
|
from app.clients.email import EmailClientNonRetryableException
|
2020-08-13 17:18:19 +01:00
|
|
|
from app.clients.email.aws_ses import AwsSesClientThrottlingSendRateException
|
2021-01-18 15:43:50 +00:00
|
|
|
from app.clients.sms import SmsClientResponseException
|
2021-03-10 13:55:06 +00:00
|
|
|
from app.config import QueueNames
|
2016-09-22 09:16:58 +01:00
|
|
|
from app.dao import notifications_dao
|
2023-04-18 12:42:23 -07:00
|
|
|
from app.dao.notifications_dao import (
|
2023-06-27 10:48:14 -07:00
|
|
|
sanitize_successful_notification_by_id,
|
2023-04-18 12:42:23 -07:00
|
|
|
update_notification_status_by_id,
|
|
|
|
|
)
|
2016-09-20 17:24:28 +01:00
|
|
|
from app.delivery import send_to_providers
|
2024-01-16 15:47:55 -05:00
|
|
|
from app.enums import NotificationStatus
|
2018-03-16 17:18:44 +00:00
|
|
|
from app.exceptions import NotificationTechnicalFailureException
|
2023-05-04 07:56:24 -07:00
|
|
|
|
2023-09-28 14:27:16 -07:00
|
|
|
# This is the amount of time to wait after sending an sms message before we check the aws logs and look for delivery
|
|
|
|
|
# receipts
|
|
|
|
|
DELIVERY_RECEIPT_DELAY_IN_SECONDS = 120
|
|
|
|
|
|
2023-05-04 07:56:24 -07:00
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
@notify_celery.task(
|
|
|
|
|
bind=True,
|
|
|
|
|
name="check_sms_delivery_receipt",
|
|
|
|
|
max_retries=48,
|
|
|
|
|
default_retry_delay=300,
|
|
|
|
|
)
|
2023-05-09 08:45:51 -07:00
|
|
|
def check_sms_delivery_receipt(self, message_id, notification_id, sent_at):
|
2023-05-05 08:09:15 -07:00
|
|
|
"""
|
|
|
|
|
This is called after deliver_sms to check the status of the message. This uses the same number of
|
|
|
|
|
retries and the same delay period as deliver_sms. In addition, this fires five minutes after
|
|
|
|
|
deliver_sms initially. So the idea is that most messages will succeed and show up in the logs quickly.
|
|
|
|
|
Other message will resolve successfully after a retry or to. A few will fail but it will take up to
|
|
|
|
|
4 hours to know for sure. The call to check_sms will raise an exception if neither a success nor a
|
|
|
|
|
failure appears in the cloudwatch logs, so this should keep retrying until the log appears, or until
|
|
|
|
|
we run out of retries.
|
|
|
|
|
"""
|
2023-08-30 07:41:04 -07:00
|
|
|
# TODO the localstack cloudwatch doesn't currently have our log groups. Possibly create them with awslocal?
|
2023-08-29 13:12:18 -07:00
|
|
|
if aws_cloudwatch_client.is_localstack():
|
2023-08-30 09:29:08 -07:00
|
|
|
status = "success"
|
|
|
|
|
provider_response = "this is a fake successful localstack sms message"
|
2023-10-24 11:35:00 -07:00
|
|
|
carrier = "unknown"
|
2023-08-29 13:12:18 -07:00
|
|
|
else:
|
2023-10-02 14:09:50 -07:00
|
|
|
try:
|
2023-10-24 11:35:00 -07:00
|
|
|
status, provider_response, carrier = aws_cloudwatch_client.check_sms(
|
2023-10-02 14:09:50 -07:00
|
|
|
message_id, notification_id, sent_at
|
|
|
|
|
)
|
|
|
|
|
except NotificationTechnicalFailureException as ntfe:
|
|
|
|
|
provider_response = "Unable to find carrier response -- still looking"
|
|
|
|
|
status = "pending"
|
2023-10-24 11:35:00 -07:00
|
|
|
carrier = ""
|
2023-10-02 14:09:50 -07:00
|
|
|
update_notification_status_by_id(
|
2023-10-24 11:35:00 -07:00
|
|
|
notification_id,
|
|
|
|
|
status,
|
|
|
|
|
carrier=carrier,
|
|
|
|
|
provider_response=provider_response,
|
2023-10-02 14:09:50 -07:00
|
|
|
)
|
|
|
|
|
raise self.retry(exc=ntfe)
|
2023-08-29 13:12:18 -07:00
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
if status == "success":
|
2024-01-18 10:28:15 -05:00
|
|
|
status = NotificationStatus.DELIVERED
|
2023-08-29 14:54:30 -07:00
|
|
|
elif status == "failure":
|
2024-01-18 10:28:15 -05:00
|
|
|
status = NotificationStatus.FAILED
|
2023-06-27 10:48:14 -07:00
|
|
|
# if status is not success or failure the client raised an exception and this method will retry
|
2016-06-13 16:40:46 +01:00
|
|
|
|
2024-01-18 10:28:15 -05:00
|
|
|
if status == NotificationStatus.DELIVERED:
|
2023-09-29 13:39:10 -07:00
|
|
|
sanitize_successful_notification_by_id(
|
2023-10-24 11:35:00 -07:00
|
|
|
notification_id, carrier=carrier, provider_response=provider_response
|
2023-09-29 13:39:10 -07:00
|
|
|
)
|
2023-08-29 14:54:30 -07:00
|
|
|
current_app.logger.info(
|
|
|
|
|
f"Sanitized notification {notification_id} that was successfully delivered"
|
|
|
|
|
)
|
2023-06-27 10:48:14 -07:00
|
|
|
else:
|
2023-08-29 14:54:30 -07:00
|
|
|
update_notification_status_by_id(
|
2023-10-24 11:35:00 -07:00
|
|
|
notification_id,
|
|
|
|
|
status,
|
|
|
|
|
carrier=carrier,
|
|
|
|
|
provider_response=provider_response,
|
2023-08-29 14:54:30 -07:00
|
|
|
)
|
|
|
|
|
current_app.logger.info(
|
|
|
|
|
f"Updated notification {notification_id} with response '{provider_response}'"
|
|
|
|
|
)
|
2023-05-26 13:47:05 -07:00
|
|
|
|
2016-09-22 17:18:05 +01:00
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
@notify_celery.task(
|
|
|
|
|
bind=True, name="deliver_sms", max_retries=48, default_retry_delay=300
|
|
|
|
|
)
|
2016-09-21 13:27:32 +01:00
|
|
|
def deliver_sms(self, notification_id):
|
2024-03-18 11:45:40 -06:00
|
|
|
"""
|
2024-03-18 11:32:29 -06:00
|
|
|
This logic will branch off to the final step in delivering
|
|
|
|
|
the notification to sns.
|
|
|
|
|
Logic is in place for delivery receipts.
|
|
|
|
|
Additional logic to help devs output authentication code to
|
|
|
|
|
terminal.
|
2024-03-18 11:45:40 -06:00
|
|
|
"""
|
2016-09-21 13:27:32 +01:00
|
|
|
try:
|
2023-08-29 14:54:30 -07:00
|
|
|
current_app.logger.info(
|
|
|
|
|
"Start sending SMS for notification id: {}".format(notification_id)
|
|
|
|
|
)
|
2016-09-22 09:16:58 +01:00
|
|
|
notification = notifications_dao.get_notification_by_id(notification_id)
|
2023-08-30 09:29:08 -07:00
|
|
|
ansi_green = "\033[32m"
|
|
|
|
|
ansi_reset = "\033[0m"
|
|
|
|
|
|
2016-09-22 09:52:23 +01:00
|
|
|
if not notification:
|
|
|
|
|
raise NoResultFound()
|
2023-08-30 09:29:08 -07:00
|
|
|
if (
|
|
|
|
|
os.getenv("NOTIFY_ENVIRONMENT") == "development"
|
|
|
|
|
and "authentication code" in notification.content
|
|
|
|
|
):
|
|
|
|
|
current_app.logger.warning(
|
|
|
|
|
ansi_green + f"AUTHENTICATION CODE: {notification.content}" + ansi_reset
|
|
|
|
|
)
|
2024-03-18 11:32:29 -06:00
|
|
|
# Code branches off to send_to_providers.py
|
2023-05-04 07:56:24 -07:00
|
|
|
message_id = send_to_providers.send_sms_to_provider(notification)
|
2023-06-13 12:57:51 -07:00
|
|
|
# We have to put it in UTC. For other timezones, the delay
|
2023-05-04 07:56:24 -07:00
|
|
|
# will be ignored and it will fire immediately (although this probably only affects developer testing)
|
2023-09-28 14:27:16 -07:00
|
|
|
my_eta = datetime.utcnow() + timedelta(
|
|
|
|
|
seconds=DELIVERY_RECEIPT_DELAY_IN_SECONDS
|
|
|
|
|
)
|
2023-05-04 07:56:24 -07:00
|
|
|
check_sms_delivery_receipt.apply_async(
|
2023-10-02 14:09:50 -07:00
|
|
|
[message_id, notification_id, notification.created_at],
|
|
|
|
|
eta=my_eta,
|
|
|
|
|
queue=QueueNames.CHECK_SMS,
|
2023-05-04 07:56:24 -07:00
|
|
|
)
|
2021-01-18 15:43:50 +00:00
|
|
|
except Exception as e:
|
2023-10-02 14:09:50 -07:00
|
|
|
update_notification_status_by_id(
|
2024-01-16 07:37:21 -05:00
|
|
|
notification_id,
|
|
|
|
|
NotificationStatus.TEMPORARY_FAILURE,
|
2023-10-02 14:09:50 -07:00
|
|
|
)
|
2021-01-18 15:43:50 +00:00
|
|
|
if isinstance(e, SmsClientResponseException):
|
|
|
|
|
current_app.logger.warning(
|
2022-03-25 12:35:22 +00:00
|
|
|
"SMS notification delivery for id: {} failed".format(notification_id),
|
2023-08-29 14:54:30 -07:00
|
|
|
exc_info=True,
|
2021-01-18 15:43:50 +00:00
|
|
|
)
|
|
|
|
|
else:
|
2016-12-15 17:30:05 +00:00
|
|
|
current_app.logger.exception(
|
2018-03-26 16:44:29 +01:00
|
|
|
"SMS notification delivery for id: {} failed".format(notification_id)
|
2016-09-21 13:27:32 +01:00
|
|
|
)
|
2021-01-18 15:43:50 +00:00
|
|
|
|
|
|
|
|
try:
|
2019-08-07 14:27:58 +01:00
|
|
|
if self.request.retries == 0:
|
|
|
|
|
self.retry(queue=QueueNames.RETRY, countdown=0)
|
|
|
|
|
else:
|
|
|
|
|
self.retry(queue=QueueNames.RETRY)
|
2016-09-21 13:27:32 +01:00
|
|
|
except self.MaxRetriesExceededError:
|
2023-08-29 14:54:30 -07:00
|
|
|
message = (
|
|
|
|
|
"RETRY FAILED: Max retries reached. The task send_sms_to_provider failed for notification {}. "
|
|
|
|
|
"Notification has been updated to technical-failure".format(
|
|
|
|
|
notification_id
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
update_notification_status_by_id(
|
2024-01-16 07:37:21 -05:00
|
|
|
notification_id,
|
|
|
|
|
NotificationStatus.TECHNICAL_FAILURE,
|
2023-08-29 14:54:30 -07:00
|
|
|
)
|
2018-03-16 17:18:44 +00:00
|
|
|
raise NotificationTechnicalFailureException(message)
|
2016-09-21 13:27:32 +01:00
|
|
|
|
|
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
@notify_celery.task(
|
|
|
|
|
bind=True, name="deliver_email", max_retries=48, default_retry_delay=300
|
|
|
|
|
)
|
2016-09-21 13:27:32 +01:00
|
|
|
def deliver_email(self, notification_id):
|
|
|
|
|
try:
|
2023-08-29 14:54:30 -07:00
|
|
|
current_app.logger.info(
|
|
|
|
|
"Start sending email for notification id: {}".format(notification_id)
|
|
|
|
|
)
|
2016-09-22 09:16:58 +01:00
|
|
|
notification = notifications_dao.get_notification_by_id(notification_id)
|
2024-03-06 10:28:34 -08:00
|
|
|
|
2016-09-22 09:52:23 +01:00
|
|
|
if not notification:
|
|
|
|
|
raise NoResultFound()
|
2024-03-06 10:28:34 -08:00
|
|
|
personalisation = redis_store.get(f"email-personalisation-{notification_id}")
|
|
|
|
|
|
|
|
|
|
notification.personalisation = json.loads(personalisation)
|
2016-09-22 09:16:58 +01:00
|
|
|
send_to_providers.send_email_to_provider(notification)
|
2020-12-30 17:06:49 +00:00
|
|
|
except EmailClientNonRetryableException as e:
|
2020-12-30 17:28:21 +00:00
|
|
|
current_app.logger.exception(
|
|
|
|
|
f"Email notification {notification_id} failed: {e}"
|
|
|
|
|
)
|
2023-08-29 14:54:30 -07:00
|
|
|
update_notification_status_by_id(notification_id, "technical-failure")
|
2020-08-13 17:18:19 +01:00
|
|
|
except Exception as e:
|
2016-09-21 13:27:32 +01:00
|
|
|
try:
|
2020-08-13 17:18:19 +01:00
|
|
|
if isinstance(e, AwsSesClientThrottlingSendRateException):
|
|
|
|
|
current_app.logger.warning(
|
|
|
|
|
f"RETRY: Email notification {notification_id} was rate limited by SES"
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
current_app.logger.exception(
|
|
|
|
|
f"RETRY: Email notification {notification_id} failed"
|
|
|
|
|
)
|
|
|
|
|
|
2017-05-25 11:12:40 +01:00
|
|
|
self.retry(queue=QueueNames.RETRY)
|
2016-09-21 13:27:32 +01:00
|
|
|
except self.MaxRetriesExceededError:
|
2023-08-29 14:54:30 -07:00
|
|
|
message = (
|
|
|
|
|
"RETRY FAILED: Max retries reached. "
|
|
|
|
|
"The task send_email_to_provider failed for notification {}. "
|
|
|
|
|
"Notification has been updated to technical-failure".format(
|
|
|
|
|
notification_id
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
update_notification_status_by_id(
|
2024-01-16 07:37:21 -05:00
|
|
|
notification_id,
|
|
|
|
|
NotificationStatus.TECHNICAL_FAILURE,
|
2023-08-29 14:54:30 -07:00
|
|
|
)
|
2018-03-16 17:18:44 +00:00
|
|
|
raise NotificationTechnicalFailureException(message)
|