Files
notifications-api/app/notifications/process_notifications.py

191 lines
6.9 KiB
Python
Raw Normal View History

import uuid
from datetime import datetime
from flask import current_app
from notifications_utils.clients import redis
2017-04-27 12:41:10 +01:00
from notifications_utils.recipients import (
get_international_phone_info,
2017-05-23 15:45:11 +01:00
validate_and_format_phone_number,
format_email_address
2017-04-27 12:41:10 +01:00
)
from notifications_utils.timezones import convert_bst_to_utc, convert_utc_to_bst
2017-04-27 12:41:10 +01:00
from app import redis_store
from app.celery import provider_tasks
from app.celery.letters_pdf_tasks import create_letters_pdf
from app.config import QueueNames
from app.models import (
EMAIL_TYPE,
KEY_TYPE_TEST,
SMS_TYPE,
LETTER_TYPE,
NOTIFICATION_CREATED,
Notification,
ScheduledNotification,
CHOOSE_POSTAGE
)
from app.dao.notifications_dao import (
dao_create_notification,
dao_delete_notifications_and_history_by_id,
dao_created_scheduled_notification
)
from app.dao.templates_dao import dao_get_template_by_id
from app.v2.errors import BadRequestError
add new redis template usage per day key We've run into issues with redis expiring keys while we try and write to them - short lived redis TTLs aren't really sustainable for keys where we mutate the state. Template usage is a hash contained in redis where we increment a count keyed by template_id each time a message is sent for that template. But if the key expires, hincrby (redis command for incrementing a value in a hash) will re-create an empty hash. This is no good, as we need the hash to be populated with the last seven days worth of data, which we then increment further. We can't tell whether the hincrby created the key, so a different approach entirely was needed: * New redis key: <service_id>-template-usage-<YYYY-MM-DD>. Note: This YYYY-MM-DD is BTC time so it lines up nicely with ft_billing table * Incremented to from process_notification - if it doesn't exist yet, it'll be created then. * Expiry set to 8 days every time it's incremented to. Then, at read time, we'll just read the last eight days of keys from Redis, and sum them up. This works because we're only ever incrementing from that one place - never setting wholesale, never recreating the data from scratch. So we know that if the data is in redis, then it is good and accurate data. One thing we *don't* know and *cannot* reason about is what no key in redis means. It could be either of: * This is the first message that the service has sent today. * The key was deleted from redis for some reason. Since we set the TTL to so long, we'll never be writing to a key that previously expired. But if there is a redis (or operator) error and the key is deleted, then we'll have bad data - after any data loss we'll have to rebuild the data.
2018-03-29 13:55:22 +01:00
from app.utils import (
cache_key_for_service_template_counter,
cache_key_for_service_template_usage_per_day,
get_template_instance,
)
def create_content_for_notification(template, personalisation):
template_object = get_template_instance(template.__dict__, personalisation)
check_placeholders(template_object)
return template_object
def check_placeholders(template_object):
if template_object.missing_data:
message = 'Missing personalisation: {}'.format(", ".join(template_object.missing_data))
raise BadRequestError(fields=[{'template': message}], message=message)
2017-04-27 12:41:10 +01:00
def persist_notification(
*,
2017-04-27 12:41:10 +01:00
template_id,
template_version,
recipient,
service,
personalisation,
notification_type,
api_key_id,
key_type,
created_at=None,
2017-04-27 12:41:10 +01:00
job_id=None,
job_row_number=None,
reference=None,
client_reference=None,
notification_id=None,
simulated=False,
created_by_id=None,
2017-11-23 14:55:49 +00:00
status=NOTIFICATION_CREATED,
reply_to_text=None,
billable_units=None
2017-04-27 12:41:10 +01:00
):
notification_created_at = created_at or datetime.utcnow()
if not notification_id:
notification_id = uuid.uuid4()
notification = Notification(
id=notification_id,
template_id=template_id,
template_version=template_version,
to=recipient,
service_id=service.id,
service=service,
personalisation=personalisation,
notification_type=notification_type,
api_key_id=api_key_id,
key_type=key_type,
created_at=notification_created_at,
job_id=job_id,
job_row_number=job_row_number,
client_reference=client_reference,
reference=reference,
created_by_id=created_by_id,
2017-11-23 14:55:49 +00:00
status=status,
reply_to_text=reply_to_text,
billable_units=billable_units
)
2017-04-27 12:41:10 +01:00
if notification_type == SMS_TYPE:
formatted_recipient = validate_and_format_phone_number(recipient, international=True)
recipient_info = get_international_phone_info(formatted_recipient)
notification.normalised_to = formatted_recipient
2017-04-27 12:41:10 +01:00
notification.international = recipient_info.international
notification.phone_prefix = recipient_info.country_prefix
notification.rate_multiplier = recipient_info.billable_units
2017-05-23 15:45:11 +01:00
elif notification_type == EMAIL_TYPE:
notification.normalised_to = format_email_address(notification.to)
elif notification_type == LETTER_TYPE:
template = dao_get_template_by_id(template_id, template_version)
if service.has_permission(CHOOSE_POSTAGE) and template.postage:
notification.postage = template.postage
else:
notification.postage = service.postage
2017-04-27 12:41:10 +01:00
# if simulated create a Notification model to return but do not persist the Notification to the dB
if not simulated:
dao_create_notification(notification)
if key_type != KEY_TYPE_TEST:
if redis_store.get(redis.daily_limit_cache_key(service.id)):
redis_store.incr(redis.daily_limit_cache_key(service.id))
if redis_store.get_all_from_hash(cache_key_for_service_template_counter(service.id)):
redis_store.increment_hash_value(cache_key_for_service_template_counter(service.id), template_id)
add new redis template usage per day key We've run into issues with redis expiring keys while we try and write to them - short lived redis TTLs aren't really sustainable for keys where we mutate the state. Template usage is a hash contained in redis where we increment a count keyed by template_id each time a message is sent for that template. But if the key expires, hincrby (redis command for incrementing a value in a hash) will re-create an empty hash. This is no good, as we need the hash to be populated with the last seven days worth of data, which we then increment further. We can't tell whether the hincrby created the key, so a different approach entirely was needed: * New redis key: <service_id>-template-usage-<YYYY-MM-DD>. Note: This YYYY-MM-DD is BTC time so it lines up nicely with ft_billing table * Incremented to from process_notification - if it doesn't exist yet, it'll be created then. * Expiry set to 8 days every time it's incremented to. Then, at read time, we'll just read the last eight days of keys from Redis, and sum them up. This works because we're only ever incrementing from that one place - never setting wholesale, never recreating the data from scratch. So we know that if the data is in redis, then it is good and accurate data. One thing we *don't* know and *cannot* reason about is what no key in redis means. It could be either of: * This is the first message that the service has sent today. * The key was deleted from redis for some reason. Since we set the TTL to so long, we'll never be writing to a key that previously expired. But if there is a redis (or operator) error and the key is deleted, then we'll have bad data - after any data loss we'll have to rebuild the data.
2018-03-29 13:55:22 +01:00
increment_template_usage_cache(service.id, template_id, notification_created_at)
current_app.logger.info(
"{} {} created at {}".format(notification_type, notification_id, notification_created_at)
)
return notification
add new redis template usage per day key We've run into issues with redis expiring keys while we try and write to them - short lived redis TTLs aren't really sustainable for keys where we mutate the state. Template usage is a hash contained in redis where we increment a count keyed by template_id each time a message is sent for that template. But if the key expires, hincrby (redis command for incrementing a value in a hash) will re-create an empty hash. This is no good, as we need the hash to be populated with the last seven days worth of data, which we then increment further. We can't tell whether the hincrby created the key, so a different approach entirely was needed: * New redis key: <service_id>-template-usage-<YYYY-MM-DD>. Note: This YYYY-MM-DD is BTC time so it lines up nicely with ft_billing table * Incremented to from process_notification - if it doesn't exist yet, it'll be created then. * Expiry set to 8 days every time it's incremented to. Then, at read time, we'll just read the last eight days of keys from Redis, and sum them up. This works because we're only ever incrementing from that one place - never setting wholesale, never recreating the data from scratch. So we know that if the data is in redis, then it is good and accurate data. One thing we *don't* know and *cannot* reason about is what no key in redis means. It could be either of: * This is the first message that the service has sent today. * The key was deleted from redis for some reason. Since we set the TTL to so long, we'll never be writing to a key that previously expired. But if there is a redis (or operator) error and the key is deleted, then we'll have bad data - after any data loss we'll have to rebuild the data.
2018-03-29 13:55:22 +01:00
def increment_template_usage_cache(service_id, template_id, created_at):
key = cache_key_for_service_template_usage_per_day(service_id, convert_utc_to_bst(created_at))
redis_store.increment_hash_value(key, template_id)
# set key to expire in eight days - we don't know if we've just created the key or not, so must assume that we
# have and reset the expiry. Eight days is longer than any notification is in the notifications table, so we'll
# always capture the full week's numbers
redis_store.expire(key, current_app.config['EXPIRE_CACHE_EIGHT_DAYS'])
def send_notification_to_queue(notification, research_mode, queue=None):
if research_mode or notification.key_type == KEY_TYPE_TEST:
queue = QueueNames.RESEARCH_MODE
if notification.notification_type == SMS_TYPE:
if not queue:
queue = QueueNames.SEND_SMS
deliver_task = provider_tasks.deliver_sms
if notification.notification_type == EMAIL_TYPE:
if not queue:
queue = QueueNames.SEND_EMAIL
deliver_task = provider_tasks.deliver_email
if notification.notification_type == LETTER_TYPE:
if not queue:
queue = QueueNames.CREATE_LETTERS_PDF
deliver_task = create_letters_pdf
try:
deliver_task.apply_async([str(notification.id)], queue=queue)
except Exception:
dao_delete_notifications_and_history_by_id(notification.id)
raise
2018-02-02 15:55:25 +00:00
current_app.logger.debug(
"{} {} sent to the {} queue for delivery".format(notification.notification_type,
notification.id,
queue))
def simulated_recipient(to_address, notification_type):
if notification_type == SMS_TYPE:
formatted_simulated_numbers = [
validate_and_format_phone_number(number) for number in current_app.config['SIMULATED_SMS_NUMBERS']
]
return to_address in formatted_simulated_numbers
else:
return to_address in current_app.config['SIMULATED_EMAIL_ADDRESSES']
def persist_scheduled_notification(notification_id, scheduled_for):
scheduled_datetime = convert_bst_to_utc(datetime.strptime(scheduled_for, "%Y-%m-%d %H:%M"))
scheduled_notification = ScheduledNotification(notification_id=notification_id,
scheduled_for=scheduled_datetime)
dao_created_scheduled_notification(scheduled_notification)