2025-01-10 07:58:24 -08:00
|
|
|
import json
|
2025-01-13 11:00:42 -08:00
|
|
|
from datetime import datetime, timedelta
|
2016-06-20 13:33:53 +01:00
|
|
|
|
|
|
|
|
from flask import current_app
|
2024-12-30 09:30:31 -08:00
|
|
|
from sqlalchemy import between, select, union
|
2016-06-20 13:33:53 +01:00
|
|
|
from sqlalchemy.exc import SQLAlchemyError
|
|
|
|
|
|
2025-10-02 07:36:55 -07:00
|
|
|
from app import db, get_zendesk_client, notify_celery, redis_store
|
2019-11-05 16:47:00 +00:00
|
|
|
from app.celery.tasks import (
|
2019-11-08 10:30:26 +00:00
|
|
|
get_recipient_csv_and_template_and_sender_id,
|
2021-03-10 13:55:06 +00:00
|
|
|
process_incomplete_jobs,
|
|
|
|
|
process_job,
|
2020-07-22 17:00:20 +01:00
|
|
|
process_row,
|
2021-03-10 13:55:06 +00:00
|
|
|
)
|
2024-12-13 14:40:37 -08:00
|
|
|
from app.clients.cloudwatch.aws_cloudwatch import AwsCloudwatchClient
|
2023-03-02 20:20:31 -05:00
|
|
|
from app.config import QueueNames
|
2021-03-10 13:55:06 +00:00
|
|
|
from app.dao.invited_org_user_dao import (
|
|
|
|
|
delete_org_invitations_created_more_than_two_days_ago,
|
|
|
|
|
)
|
2023-11-07 15:28:27 -05:00
|
|
|
from app.dao.invited_user_dao import expire_invitations_created_more_than_two_days_ago
|
2019-11-05 16:47:00 +00:00
|
|
|
from app.dao.jobs_dao import (
|
|
|
|
|
dao_set_scheduled_jobs_to_pending,
|
2024-12-30 11:45:14 -08:00
|
|
|
dao_update_job_status_to_error,
|
2019-11-05 16:47:00 +00:00
|
|
|
find_jobs_with_missing_rows,
|
2021-03-10 13:55:06 +00:00
|
|
|
find_missing_row_for_job,
|
2019-11-05 16:47:00 +00:00
|
|
|
)
|
2024-12-13 16:45:09 -08:00
|
|
|
from app.dao.notifications_dao import (
|
2025-01-10 07:58:24 -08:00
|
|
|
dao_batch_insert_notifications,
|
2025-01-08 08:44:49 -08:00
|
|
|
dao_close_out_delivery_receipts,
|
2024-12-13 16:45:09 -08:00
|
|
|
dao_update_delivery_receipts,
|
|
|
|
|
notifications_not_yet_sent,
|
|
|
|
|
)
|
2021-03-10 13:55:06 +00:00
|
|
|
from app.dao.services_dao import (
|
|
|
|
|
dao_find_services_sending_to_tv_numbers,
|
|
|
|
|
dao_find_services_with_high_failure_rates,
|
2019-12-09 15:55:36 +00:00
|
|
|
)
|
2016-06-20 13:33:53 +01:00
|
|
|
from app.dao.users_dao import delete_codes_older_created_more_than_a_day_ago
|
2024-01-16 07:37:21 -05:00
|
|
|
from app.enums import JobStatus, NotificationType
|
2025-01-10 11:21:39 -08:00
|
|
|
from app.models import Job, Notification
|
2017-05-16 13:47:22 +01:00
|
|
|
from app.notifications.process_notifications import send_notification_to_queue
|
2024-05-23 13:59:51 -07:00
|
|
|
from app.utils import utc_now
|
2024-12-17 15:59:30 -08:00
|
|
|
from notifications_utils import aware_utcnow
|
2024-05-16 10:17:45 -04:00
|
|
|
from notifications_utils.clients.zendesk.zendesk_client import NotifySupportTicket
|
2016-08-24 17:03:56 +01:00
|
|
|
|
2023-11-29 09:04:34 -08:00
|
|
|
MAX_NOTIFICATION_FAILS = 10000
|
|
|
|
|
|
2025-10-02 07:36:55 -07:00
|
|
|
zendesk_client = get_zendesk_client()
|
|
|
|
|
|
2016-08-24 17:03:56 +01:00
|
|
|
|
|
|
|
|
@notify_celery.task(name="run-scheduled-jobs")
|
|
|
|
|
def run_scheduled_jobs():
|
|
|
|
|
try:
|
2016-10-07 12:28:42 +01:00
|
|
|
for job in dao_set_scheduled_jobs_to_pending():
|
2017-05-25 10:51:49 +01:00
|
|
|
process_job.apply_async([str(job.id)], queue=QueueNames.JOBS)
|
2023-08-29 14:54:30 -07:00
|
|
|
current_app.logger.info(
|
|
|
|
|
"Job ID {} added to process job queue".format(job.id)
|
|
|
|
|
)
|
2017-05-22 16:30:45 +01:00
|
|
|
except SQLAlchemyError:
|
2016-10-17 17:44:17 +01:00
|
|
|
current_app.logger.exception("Failed to run scheduled jobs")
|
2016-08-24 17:03:56 +01:00
|
|
|
raise
|
2016-06-20 13:33:53 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
@notify_celery.task(name="delete-verify-codes")
|
|
|
|
|
def delete_verify_codes():
|
|
|
|
|
try:
|
2024-05-23 13:59:51 -07:00
|
|
|
start = utc_now()
|
2016-06-20 13:33:53 +01:00
|
|
|
deleted = delete_codes_older_created_more_than_a_day_ago()
|
|
|
|
|
current_app.logger.info(
|
2023-08-29 14:54:30 -07:00
|
|
|
"Delete job started {} finished {} deleted {} verify codes".format(
|
2024-05-23 13:59:51 -07:00
|
|
|
start, utc_now(), deleted
|
2023-08-29 14:54:30 -07:00
|
|
|
)
|
2016-06-20 13:33:53 +01:00
|
|
|
)
|
2017-08-24 17:08:39 +01:00
|
|
|
except SQLAlchemyError:
|
2016-10-17 17:44:17 +01:00
|
|
|
current_app.logger.exception("Failed to delete verify codes")
|
2016-06-20 13:33:53 +01:00
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
2023-11-07 15:28:27 -05:00
|
|
|
@notify_celery.task(name="expire-or-delete-invitations")
|
|
|
|
|
def expire_or_delete_invitations():
|
2016-06-20 13:33:53 +01:00
|
|
|
try:
|
2024-05-23 13:59:51 -07:00
|
|
|
start = utc_now()
|
2023-11-07 15:28:27 -05:00
|
|
|
expired_invites = expire_invitations_created_more_than_two_days_ago()
|
2023-11-10 16:14:03 -05:00
|
|
|
current_app.logger.info(
|
2024-05-23 13:59:51 -07:00
|
|
|
f"Expire job started {start} finished {utc_now()} expired {expired_invites} invitations"
|
2023-11-10 16:14:03 -05:00
|
|
|
)
|
2023-11-07 15:28:27 -05:00
|
|
|
except SQLAlchemyError:
|
|
|
|
|
current_app.logger.exception("Failed to expire invitations")
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
try:
|
2024-05-23 13:59:51 -07:00
|
|
|
start = utc_now()
|
2023-11-07 15:28:27 -05:00
|
|
|
deleted_invites = delete_org_invitations_created_more_than_two_days_ago()
|
2023-11-10 16:14:03 -05:00
|
|
|
current_app.logger.info(
|
2024-05-23 13:59:51 -07:00
|
|
|
f"Delete job started {start} finished {utc_now()} deleted {deleted_invites} invitations"
|
2023-11-10 16:14:03 -05:00
|
|
|
)
|
2017-08-24 17:08:39 +01:00
|
|
|
except SQLAlchemyError:
|
2016-10-17 17:44:17 +01:00
|
|
|
current_app.logger.exception("Failed to delete invitations")
|
2016-06-20 13:33:53 +01:00
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
@notify_celery.task(name="check-job-status")
|
2017-10-12 16:21:08 +01:00
|
|
|
def check_job_status():
|
|
|
|
|
"""
|
|
|
|
|
every x minutes do this check
|
|
|
|
|
select
|
|
|
|
|
from jobs
|
|
|
|
|
where job_status == 'in progress'
|
2024-12-27 10:13:28 -08:00
|
|
|
and processing started some time ago
|
2021-03-17 14:53:34 +00:00
|
|
|
OR where the job_status == 'pending'
|
2024-12-27 10:13:28 -08:00
|
|
|
and the job scheduled_for timestamp is some time ago.
|
2017-10-12 16:21:08 +01:00
|
|
|
if any results then
|
2021-03-17 14:53:34 +00:00
|
|
|
update the job_status to 'error'
|
2017-10-12 16:23:28 +01:00
|
|
|
process the rows in the csv that are missing (in another task) just do the check here.
|
2017-10-12 16:21:08 +01:00
|
|
|
"""
|
2024-12-27 10:13:28 -08:00
|
|
|
START_MINUTES = 245
|
|
|
|
|
END_MINUTES = 240
|
|
|
|
|
end_minutes_ago = utc_now() - timedelta(minutes=END_MINUTES)
|
|
|
|
|
start_minutes_ago = utc_now() - timedelta(minutes=START_MINUTES)
|
2017-10-12 16:21:08 +01:00
|
|
|
|
2024-12-20 08:09:19 -08:00
|
|
|
incomplete_in_progress_jobs = select(Job).where(
|
2024-11-15 09:34:49 -08:00
|
|
|
Job.job_status == JobStatus.IN_PROGRESS,
|
2024-12-27 10:13:28 -08:00
|
|
|
between(Job.processing_started, start_minutes_ago, end_minutes_ago),
|
2021-03-17 14:53:34 +00:00
|
|
|
)
|
2024-12-20 08:09:19 -08:00
|
|
|
incomplete_pending_jobs = select(Job).where(
|
2024-11-15 09:34:49 -08:00
|
|
|
Job.job_status == JobStatus.PENDING,
|
|
|
|
|
Job.scheduled_for.isnot(None),
|
2024-12-27 10:13:28 -08:00
|
|
|
between(Job.scheduled_for, start_minutes_ago, end_minutes_ago),
|
2021-03-17 14:53:34 +00:00
|
|
|
)
|
2024-12-30 09:30:31 -08:00
|
|
|
jobs_not_completed_after_allotted_time = union(
|
|
|
|
|
incomplete_in_progress_jobs, incomplete_pending_jobs
|
|
|
|
|
)
|
|
|
|
|
jobs_not_completed_after_allotted_time = (
|
|
|
|
|
jobs_not_completed_after_allotted_time.order_by(
|
|
|
|
|
Job.processing_started, Job.scheduled_for
|
|
|
|
|
)
|
2023-08-29 14:54:30 -07:00
|
|
|
)
|
2017-10-12 16:21:08 +01:00
|
|
|
|
2025-01-14 12:01:46 -08:00
|
|
|
jobs_not_complete_after_allotted_time = db.session.execute(
|
|
|
|
|
jobs_not_completed_after_allotted_time
|
|
|
|
|
).all()
|
2024-12-30 09:30:31 -08:00
|
|
|
|
2018-03-09 16:34:47 +00:00
|
|
|
# temporarily mark them as ERROR so that they don't get picked up by future check_job_status tasks
|
|
|
|
|
# if they haven't been re-processed in time.
|
|
|
|
|
job_ids = []
|
2024-12-27 10:13:28 -08:00
|
|
|
for job in jobs_not_complete_after_allotted_time:
|
2024-12-30 11:45:14 -08:00
|
|
|
dao_update_job_status_to_error(job)
|
2018-03-09 16:34:47 +00:00
|
|
|
job_ids.append(str(job.id))
|
2017-10-12 16:21:08 +01:00
|
|
|
if job_ids:
|
2020-07-22 17:00:20 +01:00
|
|
|
current_app.logger.info("Job(s) {} have not completed.".format(job_ids))
|
2023-08-29 14:54:30 -07:00
|
|
|
process_incomplete_jobs.apply_async([job_ids], queue=QueueNames.JOBS)
|
2017-11-09 10:32:39 +00:00
|
|
|
|
|
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
@notify_celery.task(name="replay-created-notifications")
|
2018-03-23 15:38:35 +00:00
|
|
|
def replay_created_notifications():
|
2019-11-21 15:51:27 +00:00
|
|
|
# if the notification has not be send after 1 hour, then try to resend.
|
2023-08-29 14:54:30 -07:00
|
|
|
resend_created_notifications_older_than = 60 * 60
|
2024-02-28 12:40:52 -05:00
|
|
|
for notification_type in (NotificationType.EMAIL, NotificationType.SMS):
|
2018-03-23 15:38:35 +00:00
|
|
|
notifications_to_resend = notifications_not_yet_sent(
|
2023-08-29 14:54:30 -07:00
|
|
|
resend_created_notifications_older_than, notification_type
|
2018-03-23 15:38:35 +00:00
|
|
|
)
|
|
|
|
|
|
2018-12-21 13:57:35 +00:00
|
|
|
if len(notifications_to_resend) > 0:
|
2023-08-29 14:54:30 -07:00
|
|
|
current_app.logger.info(
|
|
|
|
|
"Sending {} {} notifications "
|
|
|
|
|
"to the delivery queue because the notification "
|
|
|
|
|
"status was created.".format(
|
|
|
|
|
len(notifications_to_resend), notification_type
|
|
|
|
|
)
|
|
|
|
|
)
|
2018-03-23 15:38:35 +00:00
|
|
|
|
|
|
|
|
for n in notifications_to_resend:
|
2023-08-25 12:09:00 -07:00
|
|
|
send_notification_to_queue(notification=n)
|
2019-06-11 13:16:34 +01:00
|
|
|
|
2019-11-05 16:47:00 +00:00
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
@notify_celery.task(name="check-for-missing-rows-in-completed-jobs")
|
2019-11-05 16:47:00 +00:00
|
|
|
def check_for_missing_rows_in_completed_jobs():
|
2024-12-19 11:10:03 -08:00
|
|
|
|
2020-09-26 12:09:54 +01:00
|
|
|
jobs = find_jobs_with_missing_rows()
|
|
|
|
|
for job in jobs:
|
2023-08-29 14:54:30 -07:00
|
|
|
(
|
|
|
|
|
recipient_csv,
|
|
|
|
|
template,
|
|
|
|
|
sender_id,
|
|
|
|
|
) = get_recipient_csv_and_template_and_sender_id(job)
|
2019-11-05 16:47:00 +00:00
|
|
|
missing_rows = find_missing_row_for_job(job.id, job.notification_count)
|
|
|
|
|
for row_to_process in missing_rows:
|
2020-09-26 10:57:21 +01:00
|
|
|
row = recipient_csv[row_to_process.missing_row]
|
|
|
|
|
current_app.logger.info(
|
2024-12-26 08:35:58 -05:00
|
|
|
f"Processing missing row: {row_to_process.missing_row} for job: {job.id}"
|
2023-08-29 14:54:30 -07:00
|
|
|
)
|
2020-09-26 10:57:21 +01:00
|
|
|
process_row(row, template, job, job.service, sender_id=sender_id)
|
2019-11-29 21:24:17 +00:00
|
|
|
|
|
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
@notify_celery.task(
|
|
|
|
|
name="check-for-services-with-high-failure-rates-or-sending-to-tv-numbers"
|
|
|
|
|
)
|
2019-11-29 21:24:17 +00:00
|
|
|
def check_for_services_with_high_failure_rates_or_sending_to_tv_numbers():
|
2024-05-23 13:59:51 -07:00
|
|
|
start_date = utc_now() - timedelta(days=1)
|
|
|
|
|
end_date = utc_now()
|
2019-12-03 10:26:59 +00:00
|
|
|
message = ""
|
|
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
services_with_failures = dao_find_services_with_high_failure_rates(
|
|
|
|
|
start_date=start_date, end_date=end_date
|
|
|
|
|
)
|
|
|
|
|
services_sending_to_tv_numbers = dao_find_services_sending_to_tv_numbers(
|
|
|
|
|
start_date=start_date, end_date=end_date
|
|
|
|
|
)
|
2019-11-29 21:24:17 +00:00
|
|
|
|
|
|
|
|
if services_with_failures:
|
2019-12-03 10:26:59 +00:00
|
|
|
message += "{} service(s) have had high permanent-failure rates for sms messages in last 24 hours:\n".format(
|
2019-11-29 21:24:17 +00:00
|
|
|
len(services_with_failures)
|
|
|
|
|
)
|
|
|
|
|
for service in services_with_failures:
|
2019-12-11 10:44:40 +00:00
|
|
|
service_dashboard = "{}/services/{}".format(
|
2023-08-29 14:54:30 -07:00
|
|
|
current_app.config["ADMIN_BASE_URL"],
|
2019-12-11 10:44:40 +00:00
|
|
|
str(service.service_id),
|
|
|
|
|
)
|
2023-08-29 14:54:30 -07:00
|
|
|
message += "service: {} failure rate: {},\n".format(
|
|
|
|
|
service_dashboard, service.permanent_failure_rate
|
|
|
|
|
)
|
2019-12-03 10:26:59 +00:00
|
|
|
elif services_sending_to_tv_numbers:
|
2020-01-17 11:30:19 +00:00
|
|
|
message += "{} service(s) have sent over 500 sms messages to tv numbers in last 24 hours:\n".format(
|
2019-12-03 10:26:59 +00:00
|
|
|
len(services_sending_to_tv_numbers)
|
|
|
|
|
)
|
|
|
|
|
for service in services_sending_to_tv_numbers:
|
2019-12-11 10:44:40 +00:00
|
|
|
service_dashboard = "{}/services/{}".format(
|
2023-08-29 14:54:30 -07:00
|
|
|
current_app.config["ADMIN_BASE_URL"],
|
2019-12-11 10:44:40 +00:00
|
|
|
str(service.service_id),
|
|
|
|
|
)
|
2019-12-05 11:46:10 +00:00
|
|
|
message += "service: {} count of sms to tv numbers: {},\n".format(
|
|
|
|
|
service_dashboard, service.notification_count
|
2019-12-03 10:26:59 +00:00
|
|
|
)
|
2019-11-29 21:24:17 +00:00
|
|
|
|
2019-12-03 10:26:59 +00:00
|
|
|
if services_with_failures or services_sending_to_tv_numbers:
|
2019-12-17 11:47:23 +00:00
|
|
|
current_app.logger.warning(message)
|
2019-11-29 21:24:17 +00:00
|
|
|
|
2023-08-29 14:54:30 -07:00
|
|
|
if current_app.config["NOTIFY_ENVIRONMENT"] in ["live", "production", "test"]:
|
|
|
|
|
message += (
|
|
|
|
|
"\nYou can find instructions for this ticket in our manual:\n"
|
2023-08-23 10:35:43 -07:00
|
|
|
"https://github.com/alphagov/notifications-manuals/wiki/Support-Runbook#Deal-with-services-with-high-failure-rates-or-sending-sms-to-tv-numbers" # noqa
|
|
|
|
|
)
|
2021-09-23 16:35:12 +01:00
|
|
|
ticket = NotifySupportTicket(
|
|
|
|
|
subject=f"[{current_app.config['NOTIFY_ENVIRONMENT']}] High failure rates for sms spotted for services",
|
2019-11-29 21:24:17 +00:00
|
|
|
message=message,
|
2021-09-23 16:35:12 +01:00
|
|
|
ticket_type=NotifySupportTicket.TYPE_INCIDENT,
|
2023-08-29 14:54:30 -07:00
|
|
|
technical_ticket=True,
|
2019-11-29 21:24:17 +00:00
|
|
|
)
|
2021-09-23 16:35:12 +01:00
|
|
|
zendesk_client.send_ticket_to_zendesk(ticket)
|
2024-12-13 14:40:37 -08:00
|
|
|
|
|
|
|
|
|
2024-12-18 07:44:24 -08:00
|
|
|
@notify_celery.task(
|
|
|
|
|
bind=True, max_retries=7, default_retry_delay=3600, name="process-delivery-receipts"
|
|
|
|
|
)
|
|
|
|
|
def process_delivery_receipts(self):
|
2025-01-09 10:53:33 -08:00
|
|
|
# If we need to check db settings do it here for convenience
|
|
|
|
|
# current_app.logger.info(f"POOL SIZE {app.db.engine.pool.size()}")
|
2024-12-18 07:44:24 -08:00
|
|
|
"""
|
|
|
|
|
Every eight minutes or so (see config.py) we run this task, which searches the last ten
|
|
|
|
|
minutes of logs for delivery receipts and batch updates the db with the results. The overlap
|
|
|
|
|
is intentional. We don't mind re-updating things, it is better than losing data.
|
|
|
|
|
|
|
|
|
|
We also set this to retry with exponential backoff in the case of failure. The only way this would
|
|
|
|
|
fail is if, for example the db went down, or redis filled causing the app to stop processing. But if
|
|
|
|
|
it does fail, we need to go back over at some point when things are running again and process those results.
|
|
|
|
|
"""
|
|
|
|
|
try:
|
2024-12-20 07:33:28 -08:00
|
|
|
batch_size = 1000 # in theory with postgresql this could be 10k to 20k?
|
2024-12-18 07:44:24 -08:00
|
|
|
|
|
|
|
|
cloudwatch = AwsCloudwatchClient()
|
|
|
|
|
cloudwatch.init_app(current_app)
|
2025-01-07 09:27:04 -07:00
|
|
|
start_time = aware_utcnow() - timedelta(minutes=3)
|
2024-12-18 07:44:24 -08:00
|
|
|
end_time = aware_utcnow()
|
|
|
|
|
delivered_receipts, failed_receipts = cloudwatch.check_delivery_receipts(
|
|
|
|
|
start_time, end_time
|
|
|
|
|
)
|
|
|
|
|
delivered_receipts = list(delivered_receipts)
|
|
|
|
|
for i in range(0, len(delivered_receipts), batch_size):
|
|
|
|
|
batch = delivered_receipts[i : i + batch_size]
|
|
|
|
|
dao_update_delivery_receipts(batch, True)
|
|
|
|
|
failed_receipts = list(failed_receipts)
|
|
|
|
|
for i in range(0, len(failed_receipts), batch_size):
|
|
|
|
|
batch = failed_receipts[i : i + batch_size]
|
|
|
|
|
dao_update_delivery_receipts(batch, False)
|
|
|
|
|
except Exception as ex:
|
|
|
|
|
retry_count = self.request.retries
|
|
|
|
|
wait_time = 3600 * 2**retry_count
|
2025-07-30 11:50:36 -04:00
|
|
|
|
|
|
|
|
current_app.logger.exception(str(ex))
|
|
|
|
|
|
2024-12-18 07:44:24 -08:00
|
|
|
try:
|
|
|
|
|
raise self.retry(ex=ex, countdown=wait_time)
|
|
|
|
|
except self.MaxRetriesExceededError:
|
|
|
|
|
current_app.logger.error(
|
2025-07-30 11:50:36 -04:00
|
|
|
"Failed process delivery receipts after max retries"
|
2024-12-18 07:44:24 -08:00
|
|
|
)
|
2025-01-08 08:44:49 -08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
@notify_celery.task(
|
|
|
|
|
bind=True, max_retries=2, default_retry_delay=3600, name="cleanup-delivery-receipts"
|
|
|
|
|
)
|
|
|
|
|
def cleanup_delivery_receipts(self):
|
|
|
|
|
dao_close_out_delivery_receipts()
|
2025-01-10 07:58:24 -08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
@notify_celery.task(bind=True, name="batch-insert-notifications")
|
|
|
|
|
def batch_insert_notifications(self):
|
|
|
|
|
batch = []
|
2025-01-13 10:00:18 -08:00
|
|
|
|
|
|
|
|
current_len = redis_store.llen("message_queue")
|
2025-01-10 11:21:39 -08:00
|
|
|
with redis_store.pipeline():
|
2025-01-13 10:00:18 -08:00
|
|
|
# since this list is being fed by other processes, just grab what is available when
|
2025-01-10 11:21:39 -08:00
|
|
|
# this call is made and process that.
|
2025-01-13 10:00:18 -08:00
|
|
|
|
2025-01-10 11:21:39 -08:00
|
|
|
count = 0
|
|
|
|
|
while count < current_len:
|
|
|
|
|
count = count + 1
|
|
|
|
|
notification_bytes = redis_store.lpop("message_queue")
|
|
|
|
|
notification_dict = json.loads(notification_bytes.decode("utf-8"))
|
|
|
|
|
notification_dict["status"] = notification_dict.pop("notification_status")
|
2025-01-13 10:00:18 -08:00
|
|
|
if not notification_dict.get("created_at"):
|
|
|
|
|
notification_dict["created_at"] = utc_now()
|
2025-01-13 14:21:43 -08:00
|
|
|
elif isinstance(notification_dict["created_at"], list):
|
|
|
|
|
notification_dict["created_at"] = notification_dict["created_at"][0]
|
2025-01-10 11:21:39 -08:00
|
|
|
notification = Notification(**notification_dict)
|
2025-03-14 08:53:40 -07:00
|
|
|
# notify-api-749 do not write to db
|
|
|
|
|
# if we have a verify_code we know this is the authentication notification at login time
|
|
|
|
|
# and not csv (containing PII) provided by the user, so allow verify_code to continue to exist
|
|
|
|
|
if notification is None:
|
|
|
|
|
continue
|
|
|
|
|
if "verify_code" in str(notification.personalisation):
|
|
|
|
|
pass
|
|
|
|
|
else:
|
2025-01-10 11:21:39 -08:00
|
|
|
batch.append(notification)
|
2025-01-10 07:58:24 -08:00
|
|
|
try:
|
|
|
|
|
dao_batch_insert_notifications(batch)
|
2025-01-13 10:48:19 -08:00
|
|
|
except Exception:
|
|
|
|
|
current_app.logger.exception("Notification batch insert failed")
|
2025-01-13 10:00:18 -08:00
|
|
|
for n in batch:
|
|
|
|
|
# Use 'created_at' as a TTL so we don't retry infinitely
|
2025-01-13 14:21:43 -08:00
|
|
|
notification_time = n.created_at
|
|
|
|
|
if isinstance(notification_time, str):
|
|
|
|
|
notification_time = datetime.fromisoformat(n.created_at)
|
|
|
|
|
if notification_time < utc_now() - timedelta(seconds=50):
|
2025-01-13 10:00:18 -08:00
|
|
|
current_app.logger.warning(
|
|
|
|
|
f"Abandoning stale data, could not write to db: {n.serialize_for_redis(n)}"
|
|
|
|
|
)
|
|
|
|
|
continue
|
|
|
|
|
else:
|
|
|
|
|
redis_store.rpush("message_queue", json.dumps(n.serialize_for_redis(n)))
|