2021-03-10 13:55:06 +00:00
|
|
|
|
from datetime import datetime, timedelta
|
2019-10-14 15:01:08 +01:00
|
|
|
|
from itertools import groupby
|
|
|
|
|
|
from operator import attrgetter
|
2016-03-31 15:57:50 +01:00
|
|
|
|
|
2020-02-12 15:28:46 +00:00
|
|
|
|
from botocore.exceptions import ClientError
|
2016-03-01 13:30:10 +00:00
|
|
|
|
from flask import current_app
|
2021-03-10 13:55:06 +00:00
|
|
|
|
from notifications_utils.international_billing_rates import (
|
|
|
|
|
|
INTERNATIONAL_BILLING_RATES,
|
|
|
|
|
|
)
|
2017-05-24 14:24:57 +01:00
|
|
|
|
from notifications_utils.recipients import (
|
2017-05-30 14:40:27 +01:00
|
|
|
|
InvalidEmailError,
|
2021-03-10 13:55:06 +00:00
|
|
|
|
try_validate_and_format_phone_number,
|
|
|
|
|
|
validate_and_format_email_address,
|
2017-05-24 14:24:57 +01:00
|
|
|
|
)
|
2019-06-11 15:13:06 +01:00
|
|
|
|
from notifications_utils.timezones import convert_bst_to_utc, convert_utc_to_bst
|
2022-05-10 11:14:59 +01:00
|
|
|
|
from sqlalchemy import and_, asc, desc, func, or_, union
|
2016-08-09 13:07:48 +01:00
|
|
|
|
from sqlalchemy.orm import joinedload
|
2019-05-21 16:08:18 +01:00
|
|
|
|
from sqlalchemy.orm.exc import NoResultFound
|
2017-08-29 16:35:30 +01:00
|
|
|
|
from sqlalchemy.sql import functions
|
2018-12-10 16:27:59 +00:00
|
|
|
|
from sqlalchemy.sql.expression import case
|
|
|
|
|
|
from werkzeug.datastructures import MultiDict
|
2016-03-31 15:57:50 +01:00
|
|
|
|
|
2021-12-14 12:26:00 +00:00
|
|
|
|
from app import create_uuid, db, statsd_client
|
2021-04-14 07:11:01 +01:00
|
|
|
|
from app.dao.dao_utils import autocommit
|
2021-03-16 11:57:33 +00:00
|
|
|
|
from app.letters.utils import LetterPDFNotFound, find_letter_pdf_in_s3
|
2016-03-21 12:37:34 +00:00
|
|
|
|
from app.models import (
|
2021-03-10 13:55:06 +00:00
|
|
|
|
EMAIL_TYPE,
|
2019-11-13 16:39:59 +00:00
|
|
|
|
KEY_TYPE_NORMAL,
|
2017-10-05 16:29:11 +01:00
|
|
|
|
KEY_TYPE_TEST,
|
|
|
|
|
|
LETTER_TYPE,
|
2016-09-13 16:42:53 +01:00
|
|
|
|
NOTIFICATION_CREATED,
|
2017-02-13 14:27:32 +00:00
|
|
|
|
NOTIFICATION_DELIVERED,
|
2016-09-13 16:42:53 +01:00
|
|
|
|
NOTIFICATION_PENDING,
|
2018-11-13 14:20:24 +00:00
|
|
|
|
NOTIFICATION_PENDING_VIRUS_CHECK,
|
2017-04-19 11:34:00 +01:00
|
|
|
|
NOTIFICATION_PERMANENT_FAILURE,
|
2021-03-10 13:55:06 +00:00
|
|
|
|
NOTIFICATION_SENDING,
|
2018-03-07 18:13:40 +00:00
|
|
|
|
NOTIFICATION_SENT,
|
2020-12-16 10:50:11 +00:00
|
|
|
|
NOTIFICATION_STATUS_TYPES_COMPLETED,
|
2021-03-10 13:55:06 +00:00
|
|
|
|
NOTIFICATION_TEMPORARY_FAILURE,
|
2018-03-07 18:13:40 +00:00
|
|
|
|
SMS_TYPE,
|
2021-03-10 13:55:06 +00:00
|
|
|
|
FactNotificationStatus,
|
|
|
|
|
|
Notification,
|
|
|
|
|
|
NotificationHistory,
|
|
|
|
|
|
ProviderDetails,
|
2017-10-30 14:55:44 +00:00
|
|
|
|
)
|
2022-02-10 10:37:32 +00:00
|
|
|
|
from app.utils import (
|
|
|
|
|
|
escape_special_characters,
|
|
|
|
|
|
get_london_midnight_in_utc,
|
|
|
|
|
|
midnight_n_days_ago,
|
|
|
|
|
|
)
|
2016-04-04 12:21:38 +01:00
|
|
|
|
|
2016-02-09 12:01:17 +00:00
|
|
|
|
|
2020-02-05 16:43:17 +00:00
|
|
|
|
def dao_get_last_date_template_was_used(template_id, service_id):
|
2020-02-05 13:03:54 +00:00
|
|
|
|
last_date_from_notifications = db.session.query(
|
|
|
|
|
|
functions.max(Notification.created_at)
|
|
|
|
|
|
).filter(
|
|
|
|
|
|
Notification.service_id == service_id,
|
|
|
|
|
|
Notification.template_id == template_id,
|
|
|
|
|
|
Notification.key_type != KEY_TYPE_TEST
|
|
|
|
|
|
).scalar()
|
|
|
|
|
|
|
2020-02-05 16:43:17 +00:00
|
|
|
|
if last_date_from_notifications:
|
2020-02-05 13:03:54 +00:00
|
|
|
|
return last_date_from_notifications
|
2020-02-05 16:43:17 +00:00
|
|
|
|
|
|
|
|
|
|
last_date = db.session.query(
|
|
|
|
|
|
functions.max(FactNotificationStatus.bst_date)
|
|
|
|
|
|
).filter(
|
|
|
|
|
|
FactNotificationStatus.template_id == template_id,
|
|
|
|
|
|
FactNotificationStatus.key_type != KEY_TYPE_TEST
|
|
|
|
|
|
).scalar()
|
|
|
|
|
|
|
|
|
|
|
|
return last_date
|
2020-02-05 13:03:54 +00:00
|
|
|
|
|
|
|
|
|
|
|
2021-04-14 07:11:01 +01:00
|
|
|
|
@autocommit
|
2016-08-25 11:55:38 +01:00
|
|
|
|
def dao_create_notification(notification):
|
2016-07-11 16:48:32 +01:00
|
|
|
|
if not notification.id:
|
|
|
|
|
|
# need to populate defaulted fields before we create the notification history object
|
2016-10-28 17:10:00 +01:00
|
|
|
|
notification.id = create_uuid()
|
2016-07-11 16:48:32 +01:00
|
|
|
|
if not notification.status:
|
2017-04-19 11:34:00 +01:00
|
|
|
|
notification.status = NOTIFICATION_CREATED
|
2016-07-11 16:48:32 +01:00
|
|
|
|
|
2016-04-04 12:21:38 +01:00
|
|
|
|
db.session.add(notification)
|
2016-12-19 13:57:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
2017-05-12 14:59:14 +01:00
|
|
|
|
def country_records_delivery(phone_prefix):
|
2018-01-29 11:41:46 +00:00
|
|
|
|
dlr = INTERNATIONAL_BILLING_RATES[phone_prefix]['attributes']['dlr']
|
|
|
|
|
|
return dlr and dlr.lower() == 'yes'
|
2017-05-12 14:59:14 +01:00
|
|
|
|
|
|
|
|
|
|
|
2020-06-01 11:45:35 +01:00
|
|
|
|
def _update_notification_status(notification, status, detailed_status_code=None):
|
2022-07-05 11:27:15 -07:00
|
|
|
|
# status = _decide_permanent_temporary_failure(
|
|
|
|
|
|
# status=status, notification=notification, detailed_status_code=detailed_status_code
|
|
|
|
|
|
# )
|
|
|
|
|
|
# notification.status = status
|
|
|
|
|
|
# dao_update_notification(notification)
|
2016-09-13 12:29:40 +01:00
|
|
|
|
return notification
|
2016-03-10 17:29:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
2021-04-14 07:11:01 +01:00
|
|
|
|
@autocommit
|
2020-06-01 11:45:35 +01:00
|
|
|
|
def update_notification_status_by_id(notification_id, status, sent_by=None, detailed_status_code=None):
|
2018-12-20 16:01:39 +00:00
|
|
|
|
notification = Notification.query.with_for_update().filter(Notification.id == notification_id).first()
|
2016-05-27 12:09:36 +01:00
|
|
|
|
|
2017-05-12 14:59:14 +01:00
|
|
|
|
if not notification:
|
2019-01-29 15:55:31 +00:00
|
|
|
|
current_app.logger.info('notification not found for id {} (update to status {})'.format(
|
2018-12-20 16:01:39 +00:00
|
|
|
|
notification_id,
|
|
|
|
|
|
status
|
|
|
|
|
|
))
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
if notification.status not in {
|
|
|
|
|
|
NOTIFICATION_CREATED,
|
|
|
|
|
|
NOTIFICATION_SENDING,
|
|
|
|
|
|
NOTIFICATION_PENDING,
|
|
|
|
|
|
NOTIFICATION_SENT,
|
|
|
|
|
|
NOTIFICATION_PENDING_VIRUS_CHECK
|
|
|
|
|
|
}:
|
|
|
|
|
|
_duplicate_update_warning(notification, status)
|
2017-05-12 14:59:14 +01:00
|
|
|
|
return None
|
|
|
|
|
|
|
2020-09-09 10:55:55 +01:00
|
|
|
|
if (
|
2020-09-09 11:12:06 +01:00
|
|
|
|
notification.notification_type == SMS_TYPE
|
2020-09-09 10:55:55 +01:00
|
|
|
|
and notification.international
|
|
|
|
|
|
and not country_records_delivery(notification.phone_prefix)
|
|
|
|
|
|
):
|
2016-09-13 12:29:40 +01:00
|
|
|
|
return None
|
2018-10-24 11:24:53 +01:00
|
|
|
|
if not notification.sent_by and sent_by:
|
|
|
|
|
|
notification.sent_by = sent_by
|
2016-05-31 10:17:15 +01:00
|
|
|
|
return _update_notification_status(
|
|
|
|
|
|
notification=notification,
|
2020-04-14 15:55:17 +01:00
|
|
|
|
status=status,
|
2020-06-01 11:45:35 +01:00
|
|
|
|
detailed_status_code=detailed_status_code
|
2016-05-31 10:17:15 +01:00
|
|
|
|
)
|
2016-03-21 13:24:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
2021-04-14 07:11:01 +01:00
|
|
|
|
@autocommit
|
2016-08-25 11:55:38 +01:00
|
|
|
|
def update_notification_status_by_reference(reference, status):
|
2018-12-20 16:01:39 +00:00
|
|
|
|
# this is used to update letters and emails
|
|
|
|
|
|
notification = Notification.query.filter(Notification.reference == reference).first()
|
|
|
|
|
|
|
|
|
|
|
|
if not notification:
|
|
|
|
|
|
current_app.logger.error('notification not found for reference {} (update to {})'.format(reference, status))
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
if notification.status not in {
|
|
|
|
|
|
NOTIFICATION_SENDING,
|
2018-12-28 14:29:59 +00:00
|
|
|
|
NOTIFICATION_PENDING
|
2018-12-20 16:01:39 +00:00
|
|
|
|
}:
|
|
|
|
|
|
_duplicate_update_warning(notification, status)
|
2016-09-13 12:29:40 +01:00
|
|
|
|
return None
|
2016-05-27 12:09:36 +01:00
|
|
|
|
|
2016-05-31 10:17:15 +01:00
|
|
|
|
return _update_notification_status(
|
|
|
|
|
|
notification=notification,
|
2016-08-25 11:55:38 +01:00
|
|
|
|
status=status
|
2016-05-31 10:17:15 +01:00
|
|
|
|
)
|
2016-03-11 09:40:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
2021-04-14 07:11:01 +01:00
|
|
|
|
@autocommit
|
2016-05-26 16:46:00 +01:00
|
|
|
|
def dao_update_notification(notification):
|
|
|
|
|
|
notification.updated_at = datetime.utcnow()
|
|
|
|
|
|
db.session.add(notification)
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-04-19 10:52:52 +01:00
|
|
|
|
def get_notifications_for_job(service_id, job_id, filter_dict=None, page=1, page_size=None):
|
|
|
|
|
|
if page_size is None:
|
|
|
|
|
|
page_size = current_app.config['PAGE_SIZE']
|
2016-03-21 12:37:34 +00:00
|
|
|
|
query = Notification.query.filter_by(service_id=service_id, job_id=job_id)
|
2016-05-24 11:31:44 +01:00
|
|
|
|
query = _filter_query(query, filter_dict)
|
2016-05-19 10:46:03 +01:00
|
|
|
|
return query.order_by(asc(Notification.job_row_number)).paginate(
|
2016-03-04 14:25:28 +00:00
|
|
|
|
page=page,
|
2016-04-19 10:52:52 +01:00
|
|
|
|
per_page=page_size
|
2016-03-04 14:25:28 +00:00
|
|
|
|
)
|
2016-02-16 11:22:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
2019-10-03 14:58:49 +01:00
|
|
|
|
def dao_get_notification_count_for_job_id(*, job_id):
|
|
|
|
|
|
return Notification.query.filter_by(job_id=job_id).count()
|
2019-09-24 16:52:18 +01:00
|
|
|
|
|
|
|
|
|
|
|
2016-08-09 13:07:48 +01:00
|
|
|
|
def get_notification_with_personalisation(service_id, notification_id, key_type):
|
2016-06-30 18:43:15 +01:00
|
|
|
|
filter_dict = {'service_id': service_id, 'id': notification_id}
|
|
|
|
|
|
if key_type:
|
|
|
|
|
|
filter_dict['key_type'] = key_type
|
|
|
|
|
|
|
2017-11-09 14:25:47 +00:00
|
|
|
|
return Notification.query.filter_by(**filter_dict).options(joinedload('template')).one()
|
2016-03-01 13:30:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
2018-11-15 10:55:29 +00:00
|
|
|
|
def get_notification_by_id(notification_id, service_id=None, _raise=False):
|
|
|
|
|
|
filters = [Notification.id == notification_id]
|
|
|
|
|
|
|
|
|
|
|
|
if service_id:
|
|
|
|
|
|
filters.append(Notification.service_id == service_id)
|
|
|
|
|
|
|
|
|
|
|
|
query = Notification.query.filter(*filters)
|
|
|
|
|
|
|
|
|
|
|
|
return query.one() if _raise else query.first()
|
2016-03-10 15:40:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
2016-09-23 09:43:25 +01:00
|
|
|
|
def get_notifications_for_service(
|
2018-08-08 16:20:25 +01:00
|
|
|
|
service_id,
|
|
|
|
|
|
filter_dict=None,
|
|
|
|
|
|
page=1,
|
|
|
|
|
|
page_size=None,
|
2019-01-07 17:12:00 +00:00
|
|
|
|
count_pages=True,
|
2018-08-08 16:20:25 +01:00
|
|
|
|
limit_days=None,
|
|
|
|
|
|
key_type=None,
|
|
|
|
|
|
personalisation=False,
|
|
|
|
|
|
include_jobs=False,
|
|
|
|
|
|
include_from_test_key=False,
|
|
|
|
|
|
older_than=None,
|
|
|
|
|
|
client_reference=None,
|
2021-12-03 17:07:03 +00:00
|
|
|
|
include_one_off=True,
|
|
|
|
|
|
error_out=True
|
2016-09-23 09:43:25 +01:00
|
|
|
|
):
|
2016-04-19 10:52:52 +01:00
|
|
|
|
if page_size is None:
|
|
|
|
|
|
page_size = current_app.config['PAGE_SIZE']
|
2016-09-15 15:59:02 +01:00
|
|
|
|
|
2016-04-28 16:10:35 +01:00
|
|
|
|
filters = [Notification.service_id == service_id]
|
|
|
|
|
|
|
|
|
|
|
|
if limit_days is not None:
|
2018-04-30 11:50:56 +01:00
|
|
|
|
filters.append(Notification.created_at >= midnight_n_days_ago(limit_days))
|
2016-04-28 16:10:35 +01:00
|
|
|
|
|
2016-11-23 11:44:38 +00:00
|
|
|
|
if older_than is not None:
|
|
|
|
|
|
older_than_created_at = db.session.query(
|
|
|
|
|
|
Notification.created_at).filter(Notification.id == older_than).as_scalar()
|
|
|
|
|
|
filters.append(Notification.created_at < older_than_created_at)
|
|
|
|
|
|
|
2018-07-18 10:54:20 +01:00
|
|
|
|
if not include_jobs:
|
|
|
|
|
|
filters.append(Notification.job_id == None) # noqa
|
|
|
|
|
|
|
|
|
|
|
|
if not include_one_off:
|
|
|
|
|
|
filters.append(Notification.created_by_id == None) # noqa
|
2016-09-15 15:59:02 +01:00
|
|
|
|
|
2016-06-30 18:43:15 +01:00
|
|
|
|
if key_type is not None:
|
|
|
|
|
|
filters.append(Notification.key_type == key_type)
|
2016-09-23 10:27:10 +01:00
|
|
|
|
elif not include_from_test_key:
|
2016-09-16 13:47:09 +01:00
|
|
|
|
filters.append(Notification.key_type != KEY_TYPE_TEST)
|
|
|
|
|
|
|
2016-12-12 18:04:20 +00:00
|
|
|
|
if client_reference is not None:
|
|
|
|
|
|
filters.append(Notification.client_reference == client_reference)
|
|
|
|
|
|
|
2016-04-28 16:10:35 +01:00
|
|
|
|
query = Notification.query.filter(*filters)
|
2016-05-24 11:31:44 +01:00
|
|
|
|
query = _filter_query(query, filter_dict)
|
2016-08-09 13:07:48 +01:00
|
|
|
|
if personalisation:
|
2016-08-09 16:53:09 +01:00
|
|
|
|
query = query.options(
|
2017-11-09 14:25:47 +00:00
|
|
|
|
joinedload('template')
|
2016-08-09 13:07:48 +01:00
|
|
|
|
)
|
2016-09-15 15:59:02 +01:00
|
|
|
|
|
2016-05-06 11:07:11 +01:00
|
|
|
|
return query.order_by(desc(Notification.created_at)).paginate(
|
2016-03-01 13:30:10 +00:00
|
|
|
|
page=page,
|
2019-01-07 17:12:00 +00:00
|
|
|
|
per_page=page_size,
|
2021-12-03 17:07:03 +00:00
|
|
|
|
count=count_pages,
|
|
|
|
|
|
error_out=error_out,
|
2016-03-01 13:30:10 +00:00
|
|
|
|
)
|
2016-03-21 12:37:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
2016-05-24 11:31:44 +01:00
|
|
|
|
def _filter_query(query, filter_dict=None):
|
2016-04-04 13:13:29 +01:00
|
|
|
|
if filter_dict is None:
|
2016-11-22 17:30:03 +00:00
|
|
|
|
return query
|
|
|
|
|
|
|
|
|
|
|
|
multidict = MultiDict(filter_dict)
|
|
|
|
|
|
|
|
|
|
|
|
# filter by status
|
|
|
|
|
|
statuses = multidict.getlist('status')
|
2016-04-04 13:13:29 +01:00
|
|
|
|
if statuses:
|
2016-11-25 14:55:45 +00:00
|
|
|
|
statuses = Notification.substitute_status(statuses)
|
2016-04-04 13:13:29 +01:00
|
|
|
|
query = query.filter(Notification.status.in_(statuses))
|
2016-11-22 17:30:03 +00:00
|
|
|
|
|
|
|
|
|
|
# filter by template
|
|
|
|
|
|
template_types = multidict.getlist('template_type')
|
2016-04-04 13:13:29 +01:00
|
|
|
|
if template_types:
|
2018-12-11 14:57:10 +00:00
|
|
|
|
query = query.filter(Notification.notification_type.in_(template_types))
|
2016-11-22 17:30:03 +00:00
|
|
|
|
|
2016-03-01 13:30:10 +00:00
|
|
|
|
return query
|
2016-03-09 17:46:01 +00:00
|
|
|
|
|
|
|
|
|
|
|
2021-04-14 07:11:01 +01:00
|
|
|
|
@autocommit
|
2020-03-23 15:53:53 +00:00
|
|
|
|
def insert_notification_history_delete_notifications(
|
2020-03-24 12:21:28 +00:00
|
|
|
|
notification_type, service_id, timestamp_to_delete_backwards_from, qry_limit=50000
|
2020-03-23 15:53:53 +00:00
|
|
|
|
):
|
2021-12-01 14:28:08 +00:00
|
|
|
|
"""
|
|
|
|
|
|
Delete up to 50,000 notifications that are past retention for a notification type and service.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Steps are as follows:
|
|
|
|
|
|
|
|
|
|
|
|
Create a temporary notifications table
|
|
|
|
|
|
Populate that table with up to 50k notifications that are to be deleted. (Note: no specified order)
|
|
|
|
|
|
Insert everything in the temp table into notification history
|
|
|
|
|
|
Delete from notifications if notification id is in the temp table
|
|
|
|
|
|
Drop the temp table (automatically when the transaction commits)
|
|
|
|
|
|
|
|
|
|
|
|
Temporary tables are in a separate postgres schema, and only visible to the current session (db connection,
|
|
|
|
|
|
in a celery task there's one connection per thread.)
|
|
|
|
|
|
"""
|
2020-03-24 12:21:28 +00:00
|
|
|
|
# Setting default query limit to 50,000 which take about 48 seconds on current table size
|
|
|
|
|
|
# 10, 000 took 11s and 100,000 took 1 min 30 seconds.
|
2020-03-23 15:53:53 +00:00
|
|
|
|
select_into_temp_table = """
|
2021-12-01 14:28:08 +00:00
|
|
|
|
CREATE TEMP TABLE NOTIFICATION_ARCHIVE ON COMMIT DROP AS
|
2020-03-24 12:21:28 +00:00
|
|
|
|
SELECT id, job_id, job_row_number, service_id, template_id, template_version, api_key_id,
|
|
|
|
|
|
key_type, notification_type, created_at, sent_at, sent_by, updated_at, reference, billable_units,
|
|
|
|
|
|
client_reference, international, phone_prefix, rate_multiplier, notification_status,
|
|
|
|
|
|
created_by_id, postage, document_download_count
|
2020-03-23 15:53:53 +00:00
|
|
|
|
FROM notifications
|
|
|
|
|
|
WHERE service_id = :service_id
|
|
|
|
|
|
AND notification_type = :notification_type
|
2020-03-24 14:09:13 +00:00
|
|
|
|
AND created_at < :timestamp_to_delete_backwards_from
|
2020-03-31 09:23:41 +01:00
|
|
|
|
AND key_type in ('normal', 'team')
|
2020-03-23 15:53:53 +00:00
|
|
|
|
limit :qry_limit
|
|
|
|
|
|
"""
|
2020-12-16 10:50:11 +00:00
|
|
|
|
select_into_temp_table_for_letters = """
|
2021-12-01 14:28:08 +00:00
|
|
|
|
CREATE TEMP TABLE NOTIFICATION_ARCHIVE ON COMMIT DROP AS
|
2020-12-16 10:50:11 +00:00
|
|
|
|
SELECT id, job_id, job_row_number, service_id, template_id, template_version, api_key_id,
|
|
|
|
|
|
key_type, notification_type, created_at, sent_at, sent_by, updated_at, reference, billable_units,
|
|
|
|
|
|
client_reference, international, phone_prefix, rate_multiplier, notification_status,
|
|
|
|
|
|
created_by_id, postage, document_download_count
|
|
|
|
|
|
FROM notifications
|
|
|
|
|
|
WHERE service_id = :service_id
|
|
|
|
|
|
AND notification_type = :notification_type
|
|
|
|
|
|
AND created_at < :timestamp_to_delete_backwards_from
|
|
|
|
|
|
AND notification_status NOT IN ('pending-virus-check', 'created', 'sending')
|
|
|
|
|
|
AND key_type in ('normal', 'team')
|
|
|
|
|
|
limit :qry_limit
|
|
|
|
|
|
"""
|
2020-03-24 12:21:28 +00:00
|
|
|
|
# Insert into NotificationHistory if the row already exists do nothing.
|
2020-03-23 15:53:53 +00:00
|
|
|
|
insert_query = """
|
|
|
|
|
|
insert into notification_history
|
2020-03-24 12:21:28 +00:00
|
|
|
|
SELECT * from NOTIFICATION_ARCHIVE
|
|
|
|
|
|
ON CONFLICT ON CONSTRAINT notification_history_pkey
|
|
|
|
|
|
DO NOTHING
|
2020-03-23 15:53:53 +00:00
|
|
|
|
"""
|
|
|
|
|
|
delete_query = """
|
|
|
|
|
|
DELETE FROM notifications
|
|
|
|
|
|
where id in (select id from NOTIFICATION_ARCHIVE)
|
|
|
|
|
|
"""
|
|
|
|
|
|
input_params = {
|
|
|
|
|
|
"service_id": service_id,
|
|
|
|
|
|
"notification_type": notification_type,
|
2020-03-24 12:21:28 +00:00
|
|
|
|
"timestamp_to_delete_backwards_from": timestamp_to_delete_backwards_from,
|
2020-03-23 15:53:53 +00:00
|
|
|
|
"qry_limit": qry_limit
|
|
|
|
|
|
}
|
2020-03-25 08:08:33 +00:00
|
|
|
|
|
2020-12-16 10:50:11 +00:00
|
|
|
|
select_to_use = select_into_temp_table_for_letters if notification_type == 'letter' else select_into_temp_table
|
|
|
|
|
|
db.session.execute(select_to_use, input_params)
|
2020-03-23 15:53:53 +00:00
|
|
|
|
|
2021-11-17 14:46:52 +00:00
|
|
|
|
result = db.session.execute("select count(*) from NOTIFICATION_ARCHIVE").fetchone()[0]
|
2020-03-23 15:53:53 +00:00
|
|
|
|
|
|
|
|
|
|
db.session.execute(insert_query)
|
|
|
|
|
|
|
|
|
|
|
|
db.session.execute(delete_query)
|
|
|
|
|
|
|
2021-11-17 14:46:52 +00:00
|
|
|
|
return result
|
2020-03-23 15:53:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
2021-12-06 09:30:48 +00:00
|
|
|
|
def move_notifications_to_notification_history(
|
|
|
|
|
|
notification_type,
|
|
|
|
|
|
service_id,
|
|
|
|
|
|
timestamp_to_delete_backwards_from,
|
|
|
|
|
|
qry_limit=50000
|
|
|
|
|
|
):
|
2020-03-20 19:07:08 +00:00
|
|
|
|
deleted = 0
|
|
|
|
|
|
if notification_type == LETTER_TYPE:
|
|
|
|
|
|
_delete_letters_from_s3(
|
2021-12-06 09:30:48 +00:00
|
|
|
|
notification_type, service_id, timestamp_to_delete_backwards_from, qry_limit
|
2020-03-20 19:07:08 +00:00
|
|
|
|
)
|
2020-03-24 14:44:42 +00:00
|
|
|
|
delete_count_per_call = 1
|
|
|
|
|
|
while delete_count_per_call > 0:
|
|
|
|
|
|
delete_count_per_call = insert_notification_history_delete_notifications(
|
2020-03-24 12:21:28 +00:00
|
|
|
|
notification_type=notification_type,
|
|
|
|
|
|
service_id=service_id,
|
2021-12-06 09:30:48 +00:00
|
|
|
|
timestamp_to_delete_backwards_from=timestamp_to_delete_backwards_from,
|
2020-03-24 12:21:28 +00:00
|
|
|
|
qry_limit=qry_limit
|
2020-03-20 19:07:08 +00:00
|
|
|
|
)
|
2020-03-24 14:44:42 +00:00
|
|
|
|
deleted += delete_count_per_call
|
2020-03-20 19:07:08 +00:00
|
|
|
|
|
2020-03-24 14:09:13 +00:00
|
|
|
|
# Deleting test Notifications, test notifications are not persisted to NotificationHistory
|
2020-03-24 12:21:28 +00:00
|
|
|
|
Notification.query.filter(
|
2018-12-27 14:00:53 +00:00
|
|
|
|
Notification.notification_type == notification_type,
|
|
|
|
|
|
Notification.service_id == service_id,
|
2021-12-06 09:30:48 +00:00
|
|
|
|
Notification.created_at < timestamp_to_delete_backwards_from,
|
2019-06-03 15:16:46 +01:00
|
|
|
|
Notification.key_type == KEY_TYPE_TEST
|
2020-03-24 12:21:28 +00:00
|
|
|
|
).delete(synchronize_session=False)
|
2018-12-21 13:57:35 +00:00
|
|
|
|
db.session.commit()
|
2019-10-14 16:43:37 +01:00
|
|
|
|
|
2020-03-24 12:21:28 +00:00
|
|
|
|
return deleted
|
2019-04-29 15:44:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
2018-12-27 14:00:53 +00:00
|
|
|
|
def _delete_letters_from_s3(
|
|
|
|
|
|
notification_type, service_id, date_to_delete_from, query_limit
|
|
|
|
|
|
):
|
|
|
|
|
|
letters_to_delete_from_s3 = db.session.query(
|
|
|
|
|
|
Notification
|
|
|
|
|
|
).filter(
|
|
|
|
|
|
Notification.notification_type == notification_type,
|
|
|
|
|
|
Notification.created_at < date_to_delete_from,
|
2020-12-16 10:50:11 +00:00
|
|
|
|
Notification.service_id == service_id,
|
|
|
|
|
|
# although letters in non completed statuses do have PDFs in s3, they do not exist in the
|
2020-12-16 10:39:31 +00:00
|
|
|
|
# production-letters-pdf bucket as they never made it that far so we do not try and delete
|
|
|
|
|
|
# them from it
|
2020-12-16 10:50:11 +00:00
|
|
|
|
Notification.status.in_(NOTIFICATION_STATUS_TYPES_COMPLETED)
|
|
|
|
|
|
).limit(query_limit).all()
|
|
|
|
|
|
for letter in letters_to_delete_from_s3:
|
2021-03-08 18:09:16 +00:00
|
|
|
|
try:
|
2021-03-16 11:57:33 +00:00
|
|
|
|
letter_pdf = find_letter_pdf_in_s3(letter)
|
|
|
|
|
|
letter_pdf.delete()
|
2021-12-20 12:05:53 +00:00
|
|
|
|
except ClientError:
|
2021-03-08 18:09:16 +00:00
|
|
|
|
current_app.logger.exception(
|
2021-12-20 12:05:53 +00:00
|
|
|
|
"Error deleting S3 object for letter: {}".format(letter.id))
|
|
|
|
|
|
except LetterPDFNotFound:
|
|
|
|
|
|
current_app.logger.warning(
|
|
|
|
|
|
"No S3 object to delete for letter: {}".format(letter.id))
|
2018-08-08 16:20:25 +01:00
|
|
|
|
|
|
|
|
|
|
|
2021-04-14 07:11:01 +01:00
|
|
|
|
@autocommit
|
2019-05-30 10:37:57 +01:00
|
|
|
|
def dao_delete_notifications_by_id(notification_id):
|
2016-09-08 16:00:18 +01:00
|
|
|
|
db.session.query(Notification).filter(
|
|
|
|
|
|
Notification.id == notification_id
|
|
|
|
|
|
).delete(synchronize_session='fetch')
|
2016-09-13 16:42:53 +01:00
|
|
|
|
|
|
|
|
|
|
|
2021-12-13 16:59:25 +00:00
|
|
|
|
def dao_timeout_notifications(cutoff_time, limit=100000):
|
2017-04-19 11:34:00 +01:00
|
|
|
|
"""
|
2021-12-13 16:56:21 +00:00
|
|
|
|
Set email and SMS notifications (only) to "temporary-failure" status
|
|
|
|
|
|
if they're still sending from before the specified cutoff_time.
|
2017-04-19 11:34:00 +01:00
|
|
|
|
"""
|
|
|
|
|
|
updated_at = datetime.utcnow()
|
2021-11-25 17:52:16 +00:00
|
|
|
|
current_statuses = [NOTIFICATION_SENDING, NOTIFICATION_PENDING]
|
|
|
|
|
|
new_status = NOTIFICATION_TEMPORARY_FAILURE
|
2017-07-06 11:55:56 +01:00
|
|
|
|
|
2017-12-07 16:37:36 +00:00
|
|
|
|
notifications = Notification.query.filter(
|
2021-12-13 16:56:21 +00:00
|
|
|
|
Notification.created_at < cutoff_time,
|
2017-12-07 16:37:36 +00:00
|
|
|
|
Notification.status.in_(current_statuses),
|
2021-11-08 14:18:21 +00:00
|
|
|
|
Notification.notification_type.in_([SMS_TYPE, EMAIL_TYPE])
|
2021-12-13 16:59:25 +00:00
|
|
|
|
).limit(limit).all()
|
2021-11-08 14:18:21 +00:00
|
|
|
|
|
2019-05-30 10:37:57 +01:00
|
|
|
|
Notification.query.filter(
|
2021-11-08 14:18:21 +00:00
|
|
|
|
Notification.id.in_([n.id for n in notifications]),
|
2019-05-30 10:37:57 +01:00
|
|
|
|
).update(
|
|
|
|
|
|
{'status': new_status, 'updated_at': updated_at},
|
|
|
|
|
|
synchronize_session=False
|
|
|
|
|
|
)
|
2017-04-19 11:34:00 +01:00
|
|
|
|
|
2016-09-13 16:42:53 +01:00
|
|
|
|
db.session.commit()
|
2021-11-25 17:52:16 +00:00
|
|
|
|
return notifications
|
2016-09-30 17:17:28 +01:00
|
|
|
|
|
|
|
|
|
|
|
2019-10-14 15:01:08 +01:00
|
|
|
|
def is_delivery_slow_for_providers(
|
2018-12-04 17:39:43 +00:00
|
|
|
|
created_at,
|
2018-08-08 16:20:25 +01:00
|
|
|
|
threshold,
|
|
|
|
|
|
delivery_time,
|
2017-02-13 14:27:32 +00:00
|
|
|
|
):
|
2019-10-14 15:01:08 +01:00
|
|
|
|
"""
|
|
|
|
|
|
Returns a dict of providers and whether they are currently slow or not. eg:
|
|
|
|
|
|
{
|
|
|
|
|
|
'mmg': True,
|
|
|
|
|
|
'firetext': False
|
|
|
|
|
|
}
|
|
|
|
|
|
"""
|
|
|
|
|
|
slow_notification_counts = db.session.query(
|
|
|
|
|
|
ProviderDetails.identifier,
|
2018-12-04 17:39:43 +00:00
|
|
|
|
case(
|
|
|
|
|
|
[(
|
|
|
|
|
|
Notification.status == NOTIFICATION_DELIVERED,
|
|
|
|
|
|
(Notification.updated_at - Notification.sent_at) >= delivery_time
|
|
|
|
|
|
)],
|
|
|
|
|
|
else_=(datetime.utcnow() - Notification.sent_at) >= delivery_time
|
2019-10-14 15:01:08 +01:00
|
|
|
|
).label("slow"),
|
|
|
|
|
|
func.count().label('count')
|
|
|
|
|
|
).select_from(
|
|
|
|
|
|
ProviderDetails
|
|
|
|
|
|
).outerjoin(
|
|
|
|
|
|
Notification, and_(
|
2021-11-30 16:42:32 +00:00
|
|
|
|
Notification.notification_type == SMS_TYPE,
|
2019-10-14 15:01:08 +01:00
|
|
|
|
Notification.sent_by == ProviderDetails.identifier,
|
|
|
|
|
|
Notification.created_at >= created_at,
|
|
|
|
|
|
Notification.sent_at.isnot(None),
|
|
|
|
|
|
Notification.status.in_([NOTIFICATION_DELIVERED, NOTIFICATION_PENDING, NOTIFICATION_SENDING]),
|
|
|
|
|
|
Notification.key_type != KEY_TYPE_TEST
|
|
|
|
|
|
)
|
2018-12-04 17:39:43 +00:00
|
|
|
|
).filter(
|
2019-10-14 15:01:08 +01:00
|
|
|
|
ProviderDetails.notification_type == 'sms',
|
|
|
|
|
|
ProviderDetails.active
|
|
|
|
|
|
).order_by(
|
|
|
|
|
|
ProviderDetails.identifier
|
|
|
|
|
|
).group_by(
|
|
|
|
|
|
ProviderDetails.identifier,
|
|
|
|
|
|
"slow"
|
|
|
|
|
|
)
|
2018-12-04 17:39:43 +00:00
|
|
|
|
|
2019-10-14 15:01:08 +01:00
|
|
|
|
slow_providers = {}
|
|
|
|
|
|
for provider, rows in groupby(slow_notification_counts, key=attrgetter('identifier')):
|
|
|
|
|
|
rows = list(rows)
|
|
|
|
|
|
total_notifications = sum(row.count for row in rows)
|
|
|
|
|
|
slow_notifications = sum(row.count for row in rows if row.slow)
|
|
|
|
|
|
|
|
|
|
|
|
slow_providers[provider] = (slow_notifications / total_notifications >= threshold)
|
2021-12-14 12:26:00 +00:00
|
|
|
|
statsd_client.gauge(f'slow-delivery.{provider}.ratio', slow_notifications / total_notifications)
|
2019-10-14 15:01:08 +01:00
|
|
|
|
|
|
|
|
|
|
return slow_providers
|
2017-04-07 10:59:12 +01:00
|
|
|
|
|
|
|
|
|
|
|
2021-04-14 07:11:01 +01:00
|
|
|
|
@autocommit
|
2017-09-20 11:12:37 +01:00
|
|
|
|
def dao_update_notifications_by_reference(references, update_dict):
|
2017-09-26 09:56:09 +01:00
|
|
|
|
updated_count = Notification.query.filter(
|
2017-09-20 11:12:37 +01:00
|
|
|
|
Notification.reference.in_(references)
|
|
|
|
|
|
).update(
|
2017-09-26 09:56:09 +01:00
|
|
|
|
update_dict,
|
|
|
|
|
|
synchronize_session=False
|
2017-09-20 11:12:37 +01:00
|
|
|
|
)
|
2017-04-07 11:22:03 +01:00
|
|
|
|
|
2019-05-08 17:31:27 +01:00
|
|
|
|
updated_history_count = 0
|
2019-05-15 15:30:15 +01:00
|
|
|
|
if updated_count != len(references):
|
2019-05-08 17:31:27 +01:00
|
|
|
|
updated_history_count = NotificationHistory.query.filter(
|
|
|
|
|
|
NotificationHistory.reference.in_(references)
|
|
|
|
|
|
).update(
|
|
|
|
|
|
update_dict,
|
|
|
|
|
|
synchronize_session=False
|
|
|
|
|
|
)
|
2017-04-07 10:59:12 +01:00
|
|
|
|
|
2018-08-30 14:27:57 +01:00
|
|
|
|
return updated_count, updated_history_count
|
2017-05-05 14:12:50 +01:00
|
|
|
|
|
|
|
|
|
|
|
2020-05-01 11:18:33 +01:00
|
|
|
|
def dao_get_notifications_by_recipient_or_reference(
|
|
|
|
|
|
service_id,
|
|
|
|
|
|
search_term,
|
|
|
|
|
|
notification_type=None,
|
|
|
|
|
|
statuses=None,
|
|
|
|
|
|
page=1,
|
|
|
|
|
|
page_size=None,
|
2021-12-10 12:06:55 +00:00
|
|
|
|
error_out=True,
|
2020-05-01 11:18:33 +01:00
|
|
|
|
):
|
2018-03-07 18:13:40 +00:00
|
|
|
|
|
|
|
|
|
|
if notification_type == SMS_TYPE:
|
|
|
|
|
|
normalised = try_validate_and_format_phone_number(search_term)
|
|
|
|
|
|
|
|
|
|
|
|
for character in {'(', ')', ' ', '-'}:
|
|
|
|
|
|
normalised = normalised.replace(character, '')
|
|
|
|
|
|
|
|
|
|
|
|
normalised = normalised.lstrip('+0')
|
|
|
|
|
|
|
|
|
|
|
|
elif notification_type == EMAIL_TYPE:
|
2017-05-30 14:40:27 +01:00
|
|
|
|
try:
|
|
|
|
|
|
normalised = validate_and_format_email_address(search_term)
|
|
|
|
|
|
except InvalidEmailError:
|
2018-03-07 18:13:40 +00:00
|
|
|
|
normalised = search_term.lower()
|
2018-06-13 16:04:49 +01:00
|
|
|
|
|
2020-04-23 16:06:34 +01:00
|
|
|
|
elif notification_type in {LETTER_TYPE, None}:
|
|
|
|
|
|
# For letters, we store the address without spaces, so we need
|
|
|
|
|
|
# to removes spaces from the search term to match. We also do
|
|
|
|
|
|
# this when a notification type isn’t provided (this will
|
|
|
|
|
|
# happen if a user doesn’t have permission to see the dashboard)
|
|
|
|
|
|
# because email addresses and phone numbers will never be stored
|
|
|
|
|
|
# with spaces either.
|
2020-04-21 14:19:41 +01:00
|
|
|
|
normalised = ''.join(search_term.split()).lower()
|
2018-03-06 12:39:58 +00:00
|
|
|
|
|
2019-12-16 10:27:55 +00:00
|
|
|
|
else:
|
2020-04-23 16:06:34 +01:00
|
|
|
|
raise TypeError(
|
|
|
|
|
|
f'Notification type must be {EMAIL_TYPE}, {SMS_TYPE}, {LETTER_TYPE} or None'
|
|
|
|
|
|
)
|
2019-12-16 10:27:55 +00:00
|
|
|
|
|
2018-07-13 15:26:42 +01:00
|
|
|
|
normalised = escape_special_characters(normalised)
|
2019-12-12 16:01:22 +00:00
|
|
|
|
search_term = escape_special_characters(search_term)
|
2018-03-14 10:34:45 +00:00
|
|
|
|
|
2017-05-24 14:24:57 +01:00
|
|
|
|
filters = [
|
2017-05-05 14:12:50 +01:00
|
|
|
|
Notification.service_id == service_id,
|
2019-12-12 16:01:22 +00:00
|
|
|
|
or_(
|
|
|
|
|
|
Notification.normalised_to.like("%{}%".format(normalised)),
|
|
|
|
|
|
Notification.client_reference.ilike("%{}%".format(search_term)),
|
|
|
|
|
|
),
|
2017-08-21 15:35:55 +01:00
|
|
|
|
Notification.key_type != KEY_TYPE_TEST,
|
2017-05-24 14:24:57 +01:00
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
if statuses:
|
|
|
|
|
|
filters.append(Notification.status.in_(statuses))
|
2018-03-07 17:11:29 +00:00
|
|
|
|
if notification_type:
|
|
|
|
|
|
filters.append(Notification.notification_type == notification_type)
|
2017-05-24 14:24:57 +01:00
|
|
|
|
|
2020-04-28 11:26:06 +01:00
|
|
|
|
results = db.session.query(Notification)\
|
|
|
|
|
|
.filter(*filters)\
|
|
|
|
|
|
.order_by(desc(Notification.created_at))\
|
2021-12-10 12:06:55 +00:00
|
|
|
|
.paginate(page=page, per_page=page_size, count=False, error_out=error_out)
|
2017-05-24 14:24:57 +01:00
|
|
|
|
return results
|
2017-05-15 17:27:38 +01:00
|
|
|
|
|
|
|
|
|
|
|
2018-01-17 09:52:13 +00:00
|
|
|
|
def dao_get_notification_by_reference(reference):
|
|
|
|
|
|
return Notification.query.filter(
|
|
|
|
|
|
Notification.reference == reference
|
|
|
|
|
|
).one()
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-27 15:48:54 +00:00
|
|
|
|
def dao_get_notification_or_history_by_reference(reference):
|
2019-05-21 16:08:18 +01:00
|
|
|
|
try:
|
|
|
|
|
|
# This try except is necessary because in test keys and research mode does not create notification history.
|
|
|
|
|
|
# Otherwise we could just search for the NotificationHistory object
|
|
|
|
|
|
return Notification.query.filter(
|
|
|
|
|
|
Notification.reference == reference
|
|
|
|
|
|
).one()
|
|
|
|
|
|
except NoResultFound:
|
|
|
|
|
|
return NotificationHistory.query.filter(
|
|
|
|
|
|
NotificationHistory.reference == reference
|
|
|
|
|
|
).one()
|
2018-03-01 15:39:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
2021-03-11 18:53:43 +00:00
|
|
|
|
def dao_get_notifications_processing_time_stats(start_date, end_date):
|
2017-08-29 16:35:30 +01:00
|
|
|
|
"""
|
2021-03-11 18:53:43 +00:00
|
|
|
|
For a given time range, returns the number of notifications sent and the number of
|
|
|
|
|
|
those notifications that we processed within 10 seconds
|
|
|
|
|
|
|
2017-08-29 16:35:30 +01:00
|
|
|
|
SELECT
|
2021-02-22 15:42:29 +00:00
|
|
|
|
count(notifications),
|
2017-08-30 16:02:30 +01:00
|
|
|
|
coalesce(sum(CASE WHEN sent_at - created_at <= interval '10 seconds' THEN 1 ELSE 0 END), 0)
|
2021-02-22 15:42:29 +00:00
|
|
|
|
FROM notifications
|
2017-08-29 16:35:30 +01:00
|
|
|
|
WHERE
|
|
|
|
|
|
created_at > 'START DATE' AND
|
|
|
|
|
|
created_at < 'END DATE' AND
|
|
|
|
|
|
api_key_id IS NOT NULL AND
|
|
|
|
|
|
key_type != 'test' AND
|
|
|
|
|
|
notification_type != 'letter';
|
|
|
|
|
|
"""
|
2019-04-10 10:06:27 +01:00
|
|
|
|
under_10_secs = Notification.sent_at - Notification.created_at <= timedelta(seconds=10)
|
2017-08-30 16:02:30 +01:00
|
|
|
|
sum_column = functions.coalesce(functions.sum(
|
2017-08-29 16:35:30 +01:00
|
|
|
|
case(
|
|
|
|
|
|
[
|
|
|
|
|
|
(under_10_secs, 1)
|
|
|
|
|
|
],
|
|
|
|
|
|
else_=0
|
|
|
|
|
|
)
|
2017-08-30 16:02:30 +01:00
|
|
|
|
), 0)
|
2017-08-31 12:44:06 +01:00
|
|
|
|
|
2017-08-29 16:35:30 +01:00
|
|
|
|
return db.session.query(
|
2019-04-10 10:06:27 +01:00
|
|
|
|
func.count(Notification.id).label('messages_total'),
|
2017-08-29 16:35:30 +01:00
|
|
|
|
sum_column.label('messages_within_10_secs')
|
|
|
|
|
|
).filter(
|
2019-04-10 10:06:27 +01:00
|
|
|
|
Notification.created_at >= start_date,
|
|
|
|
|
|
Notification.created_at < end_date,
|
|
|
|
|
|
Notification.api_key_id.isnot(None),
|
|
|
|
|
|
Notification.key_type != KEY_TYPE_TEST,
|
|
|
|
|
|
Notification.notification_type != LETTER_TYPE
|
2017-08-29 16:35:30 +01:00
|
|
|
|
).one()
|
2017-09-15 17:46:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
2017-10-17 11:07:36 +01:00
|
|
|
|
def dao_get_last_notification_added_for_job_id(job_id):
|
|
|
|
|
|
last_notification_added = Notification.query.filter(
|
|
|
|
|
|
Notification.job_id == job_id
|
|
|
|
|
|
).order_by(
|
|
|
|
|
|
Notification.job_row_number.desc()
|
|
|
|
|
|
).first()
|
|
|
|
|
|
|
|
|
|
|
|
return last_notification_added
|
2017-12-18 16:12:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
2018-03-23 15:38:35 +00:00
|
|
|
|
def notifications_not_yet_sent(should_be_sending_after_seconds, notification_type):
|
|
|
|
|
|
older_than_date = datetime.utcnow() - timedelta(seconds=should_be_sending_after_seconds)
|
|
|
|
|
|
|
|
|
|
|
|
notifications = Notification.query.filter(
|
|
|
|
|
|
Notification.created_at <= older_than_date,
|
|
|
|
|
|
Notification.notification_type == notification_type,
|
|
|
|
|
|
Notification.status == NOTIFICATION_CREATED
|
|
|
|
|
|
).all()
|
|
|
|
|
|
return notifications
|
2018-06-13 16:04:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
use yield_per instead of limit
limit means we only return 50k letters, if there are more than that for
a service we'll skip them and they won't be picked up until the next
day.
If you remove the limit, sqlalchemy prefetches query results so it can
build up ORM results, for example collapsing joined rows into single
objects with chidren. SQLAlchemy streams the data into a buffer, and
normally will still prefetch the entire resultset so it can ensure
integrity of the session, (so that if you modify one result that is
duplicated further down in the results, both rows are updated in the
session for example). However, we don't care about that, but we do care
about preventing the result set taking up too much memory. We can use
`yield_per` to yield from sqlalchemy to the iterator (in this case the
`for letter in letters_awaiting_sending` loop in letters_pdf_tasks.py) -
this means every time we hit 10000 rows, we go back to the database to
get the next 10k. This way, we only ever need 10k rows in memory at a
time.
This has some caveats, mostly around how we handle the data the query
returns. They're a bit hard to parse but I'm pretty sure the notable
limitations are:
* It's dangerous to modify ORM objects returned by yield_per queries
* It's dangerous to join in a yield_per query if you think there will be
more than one row per item (for example, if you join from notification
to service, there'll be multiple result rows containing the same
service, and if these are split over different yield chunks, then we
may experience undefined behaviour.
These two limitations are focused around there being no guarantee of
having one unique row per item.
For more reading:
https://docs.sqlalchemy.org/en/13/orm/query.html?highlight=yield_per#sqlalchemy.orm.query.Query.yield_per
https://www.mail-archive.com/sqlalchemy@googlegroups.com/msg12443.html
2020-10-23 20:06:24 +01:00
|
|
|
|
def dao_get_letters_to_be_printed(print_run_deadline, postage, query_limit=10000):
|
2020-02-17 15:59:53 +00:00
|
|
|
|
"""
|
use yield_per instead of limit
limit means we only return 50k letters, if there are more than that for
a service we'll skip them and they won't be picked up until the next
day.
If you remove the limit, sqlalchemy prefetches query results so it can
build up ORM results, for example collapsing joined rows into single
objects with chidren. SQLAlchemy streams the data into a buffer, and
normally will still prefetch the entire resultset so it can ensure
integrity of the session, (so that if you modify one result that is
duplicated further down in the results, both rows are updated in the
session for example). However, we don't care about that, but we do care
about preventing the result set taking up too much memory. We can use
`yield_per` to yield from sqlalchemy to the iterator (in this case the
`for letter in letters_awaiting_sending` loop in letters_pdf_tasks.py) -
this means every time we hit 10000 rows, we go back to the database to
get the next 10k. This way, we only ever need 10k rows in memory at a
time.
This has some caveats, mostly around how we handle the data the query
returns. They're a bit hard to parse but I'm pretty sure the notable
limitations are:
* It's dangerous to modify ORM objects returned by yield_per queries
* It's dangerous to join in a yield_per query if you think there will be
more than one row per item (for example, if you join from notification
to service, there'll be multiple result rows containing the same
service, and if these are split over different yield chunks, then we
may experience undefined behaviour.
These two limitations are focused around there being no guarantee of
having one unique row per item.
For more reading:
https://docs.sqlalchemy.org/en/13/orm/query.html?highlight=yield_per#sqlalchemy.orm.query.Query.yield_per
https://www.mail-archive.com/sqlalchemy@googlegroups.com/msg12443.html
2020-10-23 20:06:24 +01:00
|
|
|
|
Return all letters created before the print run deadline that have not yet been sent. This yields in batches of 10k
|
|
|
|
|
|
to prevent the query taking too long and eating up too much memory. As each 10k batch is yielded, the
|
|
|
|
|
|
get_key_and_size_of_letters_to_be_sent_to_print function will go and fetch the s3 data, andhese start sending off
|
|
|
|
|
|
tasks to the notify-ftp app to send them.
|
|
|
|
|
|
|
|
|
|
|
|
CAUTION! Modify this query with caution. Modifying filters etc is fine, but if we join onto another table, then
|
|
|
|
|
|
there may be undefined behaviour. Essentially we need each ORM object returned for each row to be unique,
|
|
|
|
|
|
and we should avoid modifying state of returned objects.
|
|
|
|
|
|
|
|
|
|
|
|
For more reading:
|
|
|
|
|
|
https://docs.sqlalchemy.org/en/13/orm/query.html?highlight=yield_per#sqlalchemy.orm.query.Query.yield_per
|
|
|
|
|
|
https://www.mail-archive.com/sqlalchemy@googlegroups.com/msg12443.html
|
2020-02-17 15:59:53 +00:00
|
|
|
|
"""
|
2021-05-06 09:34:46 +01:00
|
|
|
|
notifications = Notification.query.filter(
|
2020-02-19 14:23:33 +00:00
|
|
|
|
Notification.created_at < convert_bst_to_utc(print_run_deadline),
|
2020-02-17 15:59:53 +00:00
|
|
|
|
Notification.notification_type == LETTER_TYPE,
|
2020-02-19 13:36:05 +00:00
|
|
|
|
Notification.status == NOTIFICATION_CREATED,
|
2020-06-30 17:54:47 +01:00
|
|
|
|
Notification.key_type == KEY_TYPE_NORMAL,
|
2022-01-07 09:15:21 +00:00
|
|
|
|
Notification.postage == postage,
|
|
|
|
|
|
Notification.billable_units > 0
|
2020-02-17 15:59:53 +00:00
|
|
|
|
).order_by(
|
2020-10-15 09:39:07 +01:00
|
|
|
|
Notification.service_id,
|
2020-02-17 15:59:53 +00:00
|
|
|
|
Notification.created_at
|
use yield_per instead of limit
limit means we only return 50k letters, if there are more than that for
a service we'll skip them and they won't be picked up until the next
day.
If you remove the limit, sqlalchemy prefetches query results so it can
build up ORM results, for example collapsing joined rows into single
objects with chidren. SQLAlchemy streams the data into a buffer, and
normally will still prefetch the entire resultset so it can ensure
integrity of the session, (so that if you modify one result that is
duplicated further down in the results, both rows are updated in the
session for example). However, we don't care about that, but we do care
about preventing the result set taking up too much memory. We can use
`yield_per` to yield from sqlalchemy to the iterator (in this case the
`for letter in letters_awaiting_sending` loop in letters_pdf_tasks.py) -
this means every time we hit 10000 rows, we go back to the database to
get the next 10k. This way, we only ever need 10k rows in memory at a
time.
This has some caveats, mostly around how we handle the data the query
returns. They're a bit hard to parse but I'm pretty sure the notable
limitations are:
* It's dangerous to modify ORM objects returned by yield_per queries
* It's dangerous to join in a yield_per query if you think there will be
more than one row per item (for example, if you join from notification
to service, there'll be multiple result rows containing the same
service, and if these are split over different yield chunks, then we
may experience undefined behaviour.
These two limitations are focused around there being no guarantee of
having one unique row per item.
For more reading:
https://docs.sqlalchemy.org/en/13/orm/query.html?highlight=yield_per#sqlalchemy.orm.query.Query.yield_per
https://www.mail-archive.com/sqlalchemy@googlegroups.com/msg12443.html
2020-10-23 20:06:24 +01:00
|
|
|
|
).yield_per(query_limit)
|
2020-02-17 15:59:53 +00:00
|
|
|
|
return notifications
|
|
|
|
|
|
|
|
|
|
|
|
|
2021-02-22 17:23:49 +00:00
|
|
|
|
def dao_get_letters_and_sheets_volume_by_postage(print_run_deadline):
|
|
|
|
|
|
notifications = db.session.query(
|
|
|
|
|
|
func.count(Notification.id).label('letters_count'),
|
|
|
|
|
|
func.sum(Notification.billable_units).label('sheets_count'),
|
|
|
|
|
|
Notification.postage
|
|
|
|
|
|
).filter(
|
|
|
|
|
|
Notification.created_at < convert_bst_to_utc(print_run_deadline),
|
|
|
|
|
|
Notification.notification_type == LETTER_TYPE,
|
|
|
|
|
|
Notification.status == NOTIFICATION_CREATED,
|
|
|
|
|
|
Notification.key_type == KEY_TYPE_NORMAL,
|
2022-01-07 09:15:21 +00:00
|
|
|
|
Notification.billable_units > 0
|
2021-02-22 17:23:49 +00:00
|
|
|
|
).group_by(
|
|
|
|
|
|
Notification.postage
|
2021-02-17 17:47:00 +00:00
|
|
|
|
).order_by(
|
|
|
|
|
|
Notification.postage
|
2021-02-22 17:23:49 +00:00
|
|
|
|
).all()
|
|
|
|
|
|
return notifications
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-06-11 15:13:06 +01:00
|
|
|
|
def dao_old_letters_with_created_status():
|
|
|
|
|
|
yesterday_bst = convert_utc_to_bst(datetime.utcnow()) - timedelta(days=1)
|
|
|
|
|
|
last_processing_deadline = yesterday_bst.replace(hour=17, minute=30, second=0, microsecond=0)
|
|
|
|
|
|
|
|
|
|
|
|
notifications = Notification.query.filter(
|
2019-11-13 16:39:59 +00:00
|
|
|
|
Notification.created_at < convert_bst_to_utc(last_processing_deadline),
|
2019-06-11 15:13:06 +01:00
|
|
|
|
Notification.notification_type == LETTER_TYPE,
|
|
|
|
|
|
Notification.status == NOTIFICATION_CREATED
|
|
|
|
|
|
).order_by(
|
2019-11-13 16:39:59 +00:00
|
|
|
|
Notification.created_at
|
2019-06-11 15:13:06 +01:00
|
|
|
|
).all()
|
|
|
|
|
|
return notifications
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-11-13 16:39:59 +00:00
|
|
|
|
def letters_missing_from_sending_bucket(seconds_to_subtract):
|
|
|
|
|
|
older_than_date = datetime.utcnow() - timedelta(seconds=seconds_to_subtract)
|
2019-11-19 16:04:21 +00:00
|
|
|
|
# We expect letters to have a `created` status, updated_at timestamp and billable units greater than zero.
|
2019-11-13 16:39:59 +00:00
|
|
|
|
notifications = Notification.query.filter(
|
2019-11-19 16:04:21 +00:00
|
|
|
|
Notification.billable_units == 0,
|
2019-11-13 16:39:59 +00:00
|
|
|
|
Notification.updated_at == None, # noqa
|
|
|
|
|
|
Notification.status == NOTIFICATION_CREATED,
|
|
|
|
|
|
Notification.created_at <= older_than_date,
|
|
|
|
|
|
Notification.notification_type == LETTER_TYPE,
|
|
|
|
|
|
Notification.key_type == KEY_TYPE_NORMAL
|
|
|
|
|
|
).order_by(
|
|
|
|
|
|
Notification.created_at
|
|
|
|
|
|
).all()
|
|
|
|
|
|
|
|
|
|
|
|
return notifications
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-06-11 13:16:34 +01:00
|
|
|
|
def dao_precompiled_letters_still_pending_virus_check():
|
|
|
|
|
|
ninety_minutes_ago = datetime.utcnow() - timedelta(seconds=5400)
|
|
|
|
|
|
|
|
|
|
|
|
notifications = Notification.query.filter(
|
|
|
|
|
|
Notification.created_at < ninety_minutes_ago,
|
|
|
|
|
|
Notification.status == NOTIFICATION_PENDING_VIRUS_CHECK
|
|
|
|
|
|
).order_by(
|
|
|
|
|
|
Notification.created_at
|
|
|
|
|
|
).all()
|
|
|
|
|
|
return notifications
|
|
|
|
|
|
|
|
|
|
|
|
|
2018-12-20 16:01:39 +00:00
|
|
|
|
def _duplicate_update_warning(notification, status):
|
|
|
|
|
|
current_app.logger.info(
|
|
|
|
|
|
(
|
2022-03-21 15:41:59 +00:00
|
|
|
|
'Duplicate callback received for service {service_id}. '
|
|
|
|
|
|
'Notification ID {id} with type {type} sent by {sent_by}. '
|
|
|
|
|
|
'New status was {new_status}, current status is {old_status}. '
|
|
|
|
|
|
'This happened {time_diff} after being first set.'
|
2018-12-20 16:01:39 +00:00
|
|
|
|
).format(
|
|
|
|
|
|
id=notification.id,
|
|
|
|
|
|
old_status=notification.status,
|
|
|
|
|
|
new_status=status,
|
2018-12-28 14:29:59 +00:00
|
|
|
|
time_diff=datetime.utcnow() - (notification.updated_at or notification.created_at),
|
2018-12-20 16:01:39 +00:00
|
|
|
|
type=notification.notification_type,
|
2022-03-21 15:41:59 +00:00
|
|
|
|
sent_by=notification.sent_by,
|
|
|
|
|
|
service_id=notification.service_id
|
2018-12-20 16:01:39 +00:00
|
|
|
|
)
|
|
|
|
|
|
)
|
2021-12-06 09:30:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
2022-01-24 15:54:37 +00:00
|
|
|
|
def get_service_ids_with_notifications_before(notification_type, timestamp):
|
2021-12-06 09:30:48 +00:00
|
|
|
|
return {
|
|
|
|
|
|
row.service_id
|
|
|
|
|
|
for row in db.session.query(
|
|
|
|
|
|
Notification.service_id
|
|
|
|
|
|
).filter(
|
|
|
|
|
|
Notification.notification_type == notification_type,
|
|
|
|
|
|
Notification.created_at < timestamp
|
|
|
|
|
|
).distinct()
|
|
|
|
|
|
}
|
2022-01-25 11:29:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_service_ids_with_notifications_on_date(notification_type, date):
|
2022-02-10 10:37:32 +00:00
|
|
|
|
start_date = get_london_midnight_in_utc(date)
|
|
|
|
|
|
end_date = get_london_midnight_in_utc(date + timedelta(days=1))
|
|
|
|
|
|
|
2022-05-10 11:14:59 +01:00
|
|
|
|
notification_table_query = db.session.query(
|
|
|
|
|
|
Notification.service_id.label('service_id')
|
|
|
|
|
|
).filter(
|
|
|
|
|
|
Notification.notification_type == notification_type,
|
|
|
|
|
|
# using >= + < is much more efficient than date(created_at)
|
|
|
|
|
|
Notification.created_at >= start_date,
|
|
|
|
|
|
Notification.created_at < end_date,
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# Looking at this table is more efficient for historical notifications,
|
|
|
|
|
|
# provided the task to populate it has run before they were archived.
|
|
|
|
|
|
ft_status_table_query = db.session.query(
|
|
|
|
|
|
FactNotificationStatus.service_id.label('service_id')
|
|
|
|
|
|
).filter(
|
|
|
|
|
|
FactNotificationStatus.notification_type == notification_type,
|
|
|
|
|
|
FactNotificationStatus.bst_date == date,
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2022-01-25 11:29:57 +00:00
|
|
|
|
return {
|
2022-05-10 11:14:59 +01:00
|
|
|
|
row.service_id for row in db.session.query(union(
|
|
|
|
|
|
notification_table_query, ft_status_table_query
|
|
|
|
|
|
).subquery()).distinct()
|
2022-01-25 11:29:57 +00:00
|
|
|
|
}
|