Files
notifications-api/app/dao/services_dao.py

596 lines
18 KiB
Python
Raw Normal View History

import uuid
from datetime import date, datetime, timedelta
from flask import current_app
2021-03-10 13:55:06 +00:00
from sqlalchemy import Float, cast
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.expression import and_, asc, case, func
from app import db
from app.dao.dao_utils import VersionOptions, autocommit, version_class
from app.dao.date_util import get_current_financial_year
from app.dao.organisation_dao import dao_get_organisation_by_email_address
from app.dao.service_sms_sender_dao import insert_service_sms_sender
from app.dao.service_user_dao import dao_get_service_user
from app.dao.template_folder_dao import dao_get_valid_template_folders_by_id
from app.models import (
2021-03-10 13:55:06 +00:00
EMAIL_TYPE,
INTERNATIONAL_LETTERS,
INTERNATIONAL_SMS_TYPE,
KEY_TYPE_TEST,
LETTER_TYPE,
NOTIFICATION_PERMANENT_FAILURE,
SMS_TYPE,
UPLOAD_LETTERS,
AnnualBilling,
ApiKey,
FactBilling,
InboundNumber,
InvitedUser,
Job,
Notification,
NotificationHistory,
Organisation,
Permission,
Service,
ServiceContactList,
2021-03-10 13:55:06 +00:00
ServiceEmailReplyTo,
ServiceLetterContact,
2021-03-10 13:55:06 +00:00
ServicePermission,
ServiceSmsSender,
Template,
TemplateHistory,
TemplateRedacted,
User,
VerifyCode,
)
2022-10-21 00:26:37 +00:00
from app.utils import (
escape_special_characters,
get_archived_db_column_value,
get_london_midnight_in_utc,
)
DEFAULT_SERVICE_PERMISSIONS = [
SMS_TYPE,
EMAIL_TYPE,
LETTER_TYPE,
INTERNATIONAL_SMS_TYPE,
UPLOAD_LETTERS,
INTERNATIONAL_LETTERS,
]
def dao_fetch_all_services(only_active=False):
query = Service.query.order_by(
asc(Service.created_at)
).options(
joinedload('users')
)
if only_active:
query = query.filter(Service.active)
return query.all()
2019-08-13 17:20:37 +01:00
def get_services_by_partial_name(service_name):
service_name = escape_special_characters(service_name)
return Service.query.filter(Service.name.ilike("%{}%".format(service_name))).all()
def dao_count_live_services():
return Service.query.filter_by(
active=True,
restricted=False,
count_as_live=True,
).count()
def dao_fetch_live_services_data():
year_start_date, year_end_date = get_current_financial_year()
most_recent_annual_billing = db.session.query(
AnnualBilling.service_id,
func.max(AnnualBilling.financial_year_start).label('year')
).group_by(
AnnualBilling.service_id
).subquery()
this_year_ft_billing = FactBilling.query.filter(
FactBilling.bst_date >= year_start_date,
FactBilling.bst_date <= year_end_date,
).subquery()
data = db.session.query(
Service.id.label('service_id'),
Service.name.label("service_name"),
Organisation.name.label("organisation_name"),
Organisation.organisation_type.label('organisation_type'),
Service.consent_to_research.label('consent_to_research'),
User.name.label('contact_name'),
User.email_address.label('contact_email'),
User.mobile_number.label('contact_mobile'),
Service.go_live_at.label("live_date"),
Service.volume_sms.label('sms_volume_intent'),
Service.volume_email.label('email_volume_intent'),
Service.volume_letter.label('letter_volume_intent'),
case([
(this_year_ft_billing.c.notification_type == 'email', func.sum(this_year_ft_billing.c.notifications_sent))
], else_=0).label("email_totals"),
case([
(this_year_ft_billing.c.notification_type == 'sms', func.sum(this_year_ft_billing.c.notifications_sent))
], else_=0).label("sms_totals"),
case([
(this_year_ft_billing.c.notification_type == 'letter', func.sum(this_year_ft_billing.c.notifications_sent))
], else_=0).label("letter_totals"),
AnnualBilling.free_sms_fragment_limit,
).join(
Service.annual_billing
).join(
most_recent_annual_billing,
and_(
Service.id == most_recent_annual_billing.c.service_id,
AnnualBilling.financial_year_start == most_recent_annual_billing.c.year
)
).outerjoin(
Service.organisation
).outerjoin(
this_year_ft_billing, Service.id == this_year_ft_billing.c.service_id
).outerjoin(
User, Service.go_live_user_id == User.id
).filter(
Service.count_as_live.is_(True),
Service.active.is_(True),
Service.restricted.is_(False),
).group_by(
Service.id,
Organisation.name,
Organisation.organisation_type,
Service.name,
Service.consent_to_research,
Service.count_as_live,
Service.go_live_user_id,
User.name,
User.email_address,
User.mobile_number,
Service.go_live_at,
Service.volume_sms,
Service.volume_email,
Service.volume_letter,
this_year_ft_billing.c.notification_type,
AnnualBilling.free_sms_fragment_limit,
).order_by(
asc(Service.go_live_at)
).all()
results = []
for row in data:
existing_service = next((x for x in results if x['service_id'] == row.service_id), None)
if existing_service is not None:
existing_service["email_totals"] += row.email_totals
existing_service["sms_totals"] += row.sms_totals
existing_service["letter_totals"] += row.letter_totals
else:
results.append(row._asdict())
return results
def dao_fetch_service_by_id(service_id, only_active=False):
query = Service.query.filter_by(
id=service_id
).options(
joinedload('users')
)
if only_active:
query = query.filter(Service.active)
return query.one()
def dao_fetch_service_by_inbound_number(number):
inbound_number = InboundNumber.query.filter(
InboundNumber.number == number,
2017-08-16 16:27:42 +01:00
InboundNumber.active
).first()
if not inbound_number:
return None
2017-05-22 11:26:47 +01:00
return Service.query.filter(
Service.id == inbound_number.service_id
).first()
2017-05-22 11:26:47 +01:00
def dao_fetch_service_by_id_with_api_keys(service_id, only_active=False):
query = Service.query.filter_by(
id=service_id
).options(
joinedload('api_keys')
)
if only_active:
query = query.filter(Service.active)
return query.one()
def dao_fetch_all_services_by_user(user_id, only_active=False):
query = Service.query.filter(
Service.users.any(id=user_id)
).order_by(
asc(Service.created_at)
).options(
joinedload('users')
)
if only_active:
query = query.filter(Service.active)
return query.all()
def dao_fetch_all_services_created_by_user(user_id):
query = Service.query.filter_by(
created_by_id=user_id
).order_by(
asc(Service.created_at)
)
return query.all()
@autocommit
@version_class(
VersionOptions(ApiKey, must_write_history=False),
VersionOptions(Service),
VersionOptions(Template, history_class=TemplateHistory, must_write_history=False),
)
def dao_archive_service(service_id):
# have to eager load templates and api keys so that we don't flush when we loop through them
# to ensure that db.session still contains the models when it comes to creating history objects
service = Service.query.options(
joinedload('templates'),
joinedload('templates.template_redacted'),
joinedload('api_keys'),
).filter(Service.id == service_id).one()
service.active = False
service.name = get_archived_db_column_value(service.name)
service.email_from = get_archived_db_column_value(service.email_from)
for template in service.templates:
if not template.archived:
template.archived = True
for api_key in service.api_keys:
if not api_key.expiry_date:
api_key.expiry_date = datetime.utcnow()
def dao_fetch_service_by_id_and_user(service_id, user_id):
return Service.query.filter(
Service.users.any(id=user_id),
Service.id == service_id
).options(
joinedload('users')
).one()
@autocommit
@version_class(Service)
def dao_create_service(
service,
user,
service_id=None,
service_permissions=None,
):
if not user:
raise ValueError("Can't create a service without a user")
if service_permissions is None:
service_permissions = DEFAULT_SERVICE_PERMISSIONS
organisation = dao_get_organisation_by_email_address(user.email_address)
from app.dao.permissions_dao import permission_dao
service.users.append(user)
permission_dao.add_default_service_permissions_for_user(user, service)
service.id = service_id or uuid.uuid4() # must be set now so version history model can use same id
2016-11-08 13:49:47 +00:00
service.active = True
service.research_mode = False
2017-06-23 17:06:09 +01:00
for permission in service_permissions:
service_permission = ServicePermission(service_id=service.id, permission=permission)
service.permissions.append(service_permission)
# do we just add the default - or will we get a value from FE?
insert_service_sms_sender(service, current_app.config['FROM_NUMBER'])
if organisation:
service.organisation_id = organisation.id
service.organisation_type = organisation.organisation_type
if organisation.email_branding:
service.email_branding = organisation.email_branding
if organisation.letter_branding:
service.letter_branding = organisation.letter_branding
if organisation:
service.crown = organisation.crown
service.count_as_live = not user.platform_admin
db.session.add(service)
@autocommit
@version_class(Service)
def dao_update_service(service):
db.session.add(service)
2016-01-12 10:39:49 +00:00
def dao_add_user_to_service(service, user, permissions=None, folder_permissions=None):
permissions = permissions or []
folder_permissions = folder_permissions or []
try:
from app.dao.permissions_dao import permission_dao
service.users.append(user)
permission_dao.set_user_service_permission(user, service, permissions, _commit=False)
db.session.add(service)
service_user = dao_get_service_user(user.id, service.id)
valid_template_folders = dao_get_valid_template_folders_by_id(folder_permissions)
service_user.folders = valid_template_folders
db.session.add(service_user)
except Exception as e:
db.session.rollback()
raise e
else:
db.session.commit()
def dao_remove_user_from_service(service, user):
try:
from app.dao.permissions_dao import permission_dao
permission_dao.remove_user_service_permissions(user, service)
service_user = dao_get_service_user(user.id, service.id)
db.session.delete(service_user)
except Exception as e:
db.session.rollback()
raise e
else:
db.session.commit()
def delete_service_and_all_associated_db_objects(service):
def _delete_commit(query):
query.delete(synchronize_session=False)
db.session.commit()
subq = db.session.query(Template.id).filter_by(service=service).subquery()
_delete_commit(TemplateRedacted.query.filter(TemplateRedacted.template_id.in_(subq)))
_delete_commit(ServiceSmsSender.query.filter_by(service=service))
_delete_commit(ServiceEmailReplyTo.query.filter_by(service=service))
_delete_commit(ServiceLetterContact.query.filter_by(service=service))
_delete_commit(ServiceContactList.query.filter_by(service=service))
_delete_commit(InvitedUser.query.filter_by(service=service))
_delete_commit(Permission.query.filter_by(service=service))
_delete_commit(NotificationHistory.query.filter_by(service=service))
_delete_commit(Notification.query.filter_by(service=service))
2017-07-03 13:40:13 +01:00
_delete_commit(Job.query.filter_by(service=service))
_delete_commit(Template.query.filter_by(service=service))
_delete_commit(TemplateHistory.query.filter_by(service_id=service.id))
_delete_commit(ServicePermission.query.filter_by(service_id=service.id))
_delete_commit(ApiKey.query.filter_by(service=service))
_delete_commit(ApiKey.get_history_model().query.filter_by(service_id=service.id))
_delete_commit(AnnualBilling.query.filter_by(service_id=service.id))
verify_codes = VerifyCode.query.join(User).filter(User.id.in_([x.id for x in service.users]))
list(map(db.session.delete, verify_codes))
db.session.commit()
users = [x for x in service.users]
for user in users:
user.organisations = []
service.users.remove(user)
_delete_commit(Service.get_history_model().query.filter_by(id=service.id))
db.session.delete(service)
db.session.commit()
for user in users:
db.session.delete(user)
db.session.commit()
def dao_fetch_todays_stats_for_service(service_id):
Update the dao_fetch_todays_stats_for_service query. We have an index on Notifications(service_id, created_at), by updating the query to use between created_at rather than date(created_at) this query will use the index. Changing the query plan to use an index scan rather than a sequence scan, see query plans below. This query is still rather slow but is improved by this update. https://www.pivotaltracker.com/story/show/178263480 explain analyze SELECT notification_type, notification_status, count(id) FROM notifications WHERE service_id = 'e791dbd4-09ea-413a-b773-ead8728ddb09' AND date(created_at) = '2021-05-23' AND key_type != 'test' GROUP BY notification_type, notification_status; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Finalize GroupAggregate (cost=6326816.31..6326926.48 rows=24 width=22) (actual time=91666.805..91712.976 rows=10 loops=1) Group Key: notification_type, notification_status -> Gather Merge (cost=6326816.31..6326925.88 rows=48 width=22) (actual time=91666.712..91712.962 rows=30 loops=1) Workers Planned: 2 Workers Launched: 2 -> Partial GroupAggregate (cost=6325816.28..6325920.31 rows=24 width=22) (actual time=91662.907..91707.027 rows=10 loops=3) Group Key: notification_type, notification_status -> Sort (cost=6325816.28..6325842.23 rows=10379 width=30) (actual time=91635.890..91676.225 rows=270884 loops=3) Sort Key: notification_type, notification_status Sort Method: external merge Disk: 10584kB Worker 0: Sort Method: external merge Disk: 10648kB Worker 1: Sort Method: external merge Disk: 10696kB -> Parallel Seq Scan on notifications (cost=0.00..6325123.93 rows=10379 width=30) (actual time=0.036..91513.985 rows=270884 loops=3) Filter: (((key_type)::text <> 'test'::text) AND (service_id = 'e791dbd4-09ea-413a-b773-ead8728ddb09'::uuid) AND (date(created_at) = '2021-05-23'::date)) Rows Removed by Filter: 16191366 Planning Time: 0.760 ms Execution Time: 91714.500 ms (17 rows) explain analyze SELECT notification_type, notification_status, count(id) FROM notifications WHERE service_id = 'e791dbd4-09ea-413a-b773-ead8728ddb09' AND created_at >= '2021-05-22 23:00' and created_at < '2021-05-23 23:00' AND key_type != 'test' GROUP BY notification_type, notification_status; QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (cost=2114273.37..2114279.57 rows=24 width=22) (actual time=21032.076..21035.725 rows=10 loops=1) Group Key: notification_type, notification_status -> Gather Merge (cost=2114273.37..2114278.97 rows=48 width=22) (actual time=21032.056..21035.703 rows=30 loops=1) Workers Planned: 2 Workers Launched: 2 -> Sort (cost=2113273.35..2113273.41 rows=24 width=22) (actual time=21029.261..21029.265 rows=10 loops=3) Sort Key: notification_type, notification_status Sort Method: quicksort Memory: 25kB Worker 0: Sort Method: quicksort Memory: 25kB Worker 1: Sort Method: quicksort Memory: 25kB -> Partial HashAggregate (cost=2113272.56..2113272.80 rows=24 width=22) (actual time=21029.228..21029.230 rows=10 loops=3) Group Key: notification_type, notification_status -> Parallel Bitmap Heap Scan on notifications (cost=114455.71..2111695.14 rows=210322 width=30) (actual time=4983.790..20960.581 rows=271217 loops=3) Recheck Cond: ((service_id = 'e791dbd4-09ea-413a-b773-ead8728ddb09'::uuid) AND (created_at >= '2021-05-22 23:00:00'::timestamp without time zone) AND (created_at < '2021-05-23 23:00:00'::timestamp without time zone)) Rows Removed by Index Recheck: 1456269 Filter: ((key_type)::text <> 'test'::text) Heap Blocks: exact=12330 lossy=123418 -> Bitmap Index Scan on ix_notifications_service_created_at (cost=0.00..114329.51 rows=543116 width=0) (actual time=4973.139..4973.140 rows=813671 loops=1) Index Cond: ((service_id = 'e791dbd4-09ea-413a-b773-ead8728ddb09'::uuid) AND (created_at >= '2021-05-22 23:00:00'::timestamp without time zone) AND (created_at < '2021-05-23 23:00:00'::timestamp without time zone)) Planning Time: 0.191 ms Execution Time: 21035.770 ms (21 rows)
2021-05-24 14:36:07 +01:00
today = date.today()
start_date = get_london_midnight_in_utc(today)
return db.session.query(
Notification.notification_type,
Notification.status,
func.count(Notification.id).label('count')
).filter(
Notification.service_id == service_id,
Notification.key_type != KEY_TYPE_TEST,
Notification.created_at >= start_date
).group_by(
Notification.notification_type,
Notification.status,
).all()
def dao_fetch_todays_stats_for_all_services(include_from_test_key=True, only_active=True):
Optimize the query used to return the services and todays notification totals. By changing the created_at filter to use a specific date range I found a significant improvement to the queries performance. The unit test needed to change because now were are returning todays date as BST the local timezone. Query plan before Merge Left Join (cost=1226133.76..1226143.77 rows=1753 width=70) (actual time=5800.160..5801.657 rows=1849 loops=1) Merge Cond: (services.id = anon_1.service_id) -> Sort (cost=152.99..157.37 rows=1753 width=46) (actual time=2.205..2.631 rows=1762 loops=1) Sort Key: services.id Sort Method: quicksort Memory: 224kB -> Seq Scan on services (cost=0.00..58.54 rows=1753 width=46) (actual time=0.011..1.156 rows=1762 loops=1) Filter: active Rows Removed by Filter: 101 -> Sort (cost=1225980.77..1225980.99 rows=86 width=40) (actual time=5797.949..5797.984 rows=198 loops=1) Sort Key: anon_1.service_id Sort Method: quicksort Memory: 40kB -> Subquery Scan on anon_1 (cost=1225976.29..1225978.01 rows=86 width=40) (actual time=5797.682..5797.823 rows=198 loops=1) -> HashAggregate (cost=1225976.29..1225977.15 rows=86 width=48) (actual time=5797.681..5797.747 rows=198 loops=1) Group Key: notifications.notification_type, notifications.notification_status, notifications.service_id -> Seq Scan on notifications (cost=0.00..1220610.86 rows=536543 width=48) (actual time=0.064..5482.975 rows=643799 loops=1) Filter: (((key_type)::text <> 'TEST'::text) AND (date(created_at) = '2018-03-20'::date)) Rows Removed by Filter: 6804774 Planning time: 1.106 ms Execution time: 5802.130 ms Query plan after Merge Left Join (cost=953378.30..953388.30 rows=1753 width=70) (actual time=2380.144..2382.499 rows=1852 loops=1) Merge Cond: (services.id = anon_1.service_id) -> Sort (cost=152.99..157.37 rows=1753 width=46) (actual time=2.944..3.570 rows=1762 loops=1) Sort Key: services.id Sort Method: quicksort Memory: 224kB -> Seq Scan on services (cost=0.00..58.54 rows=1753 width=46) (actual time=0.006..1.294 rows=1762 loops=1) Filter: active Rows Removed by Filter: 101 -> Sort (cost=953225.31..953225.53 rows=86 width=40) (actual time=2377.194..2377.262 rows=201 loops=1) Sort Key: anon_1.service_id Sort Method: quicksort Memory: 40kB -> Subquery Scan on anon_1 (cost=953220.83..953222.55 rows=86 width=40) (actual time=2376.797..2377.034 rows=201 loops=1) -> HashAggregate (cost=953220.83..953221.69 rows=86 width=48) (actual time=2376.795..2376.905 rows=201 loops=1) Group Key: notifications.notification_type, notifications.notification_status, notifications.service_id -> Bitmap Heap Scan on notifications (cost=29883.14..947856.24 rows=536459 width=48) (actual time=270.061..1887.754 rows=644735 loops=1) Recheck Cond: ((created_at >= '2018-03-20 00:00:00'::timestamp without time zone) AND (created_at < '2018-03-21 00:00:00'::timestamp without time zone)) Rows Removed by Index Recheck: 947427 Filter: ((key_type)::text <> 'TEST'::text) Heap Blocks: exact=40882 lossy=186483 -> Bitmap Index Scan on ix_notifications_created_at (cost=0.00..29749.02 rows=536459 width=0) (actual time=258.631..258.631 rows=644849 loops=1) Index Cond: ((created_at >= '2018-03-20 00:00:00'::timestamp without time zone) AND (created_at < '2018-03-21 00:00:00'::timestamp without time zone)) Planning time: 0.548 ms Execution time: 2383.485 ms
2018-03-20 15:48:32 +00:00
today = date.today()
start_date = get_london_midnight_in_utc(today)
end_date = get_london_midnight_in_utc(today + timedelta(days=1))
subquery = db.session.query(
Notification.notification_type,
Notification.status,
Notification.service_id,
func.count(Notification.id).label('count')
).filter(
Optimize the query used to return the services and todays notification totals. By changing the created_at filter to use a specific date range I found a significant improvement to the queries performance. The unit test needed to change because now were are returning todays date as BST the local timezone. Query plan before Merge Left Join (cost=1226133.76..1226143.77 rows=1753 width=70) (actual time=5800.160..5801.657 rows=1849 loops=1) Merge Cond: (services.id = anon_1.service_id) -> Sort (cost=152.99..157.37 rows=1753 width=46) (actual time=2.205..2.631 rows=1762 loops=1) Sort Key: services.id Sort Method: quicksort Memory: 224kB -> Seq Scan on services (cost=0.00..58.54 rows=1753 width=46) (actual time=0.011..1.156 rows=1762 loops=1) Filter: active Rows Removed by Filter: 101 -> Sort (cost=1225980.77..1225980.99 rows=86 width=40) (actual time=5797.949..5797.984 rows=198 loops=1) Sort Key: anon_1.service_id Sort Method: quicksort Memory: 40kB -> Subquery Scan on anon_1 (cost=1225976.29..1225978.01 rows=86 width=40) (actual time=5797.682..5797.823 rows=198 loops=1) -> HashAggregate (cost=1225976.29..1225977.15 rows=86 width=48) (actual time=5797.681..5797.747 rows=198 loops=1) Group Key: notifications.notification_type, notifications.notification_status, notifications.service_id -> Seq Scan on notifications (cost=0.00..1220610.86 rows=536543 width=48) (actual time=0.064..5482.975 rows=643799 loops=1) Filter: (((key_type)::text <> 'TEST'::text) AND (date(created_at) = '2018-03-20'::date)) Rows Removed by Filter: 6804774 Planning time: 1.106 ms Execution time: 5802.130 ms Query plan after Merge Left Join (cost=953378.30..953388.30 rows=1753 width=70) (actual time=2380.144..2382.499 rows=1852 loops=1) Merge Cond: (services.id = anon_1.service_id) -> Sort (cost=152.99..157.37 rows=1753 width=46) (actual time=2.944..3.570 rows=1762 loops=1) Sort Key: services.id Sort Method: quicksort Memory: 224kB -> Seq Scan on services (cost=0.00..58.54 rows=1753 width=46) (actual time=0.006..1.294 rows=1762 loops=1) Filter: active Rows Removed by Filter: 101 -> Sort (cost=953225.31..953225.53 rows=86 width=40) (actual time=2377.194..2377.262 rows=201 loops=1) Sort Key: anon_1.service_id Sort Method: quicksort Memory: 40kB -> Subquery Scan on anon_1 (cost=953220.83..953222.55 rows=86 width=40) (actual time=2376.797..2377.034 rows=201 loops=1) -> HashAggregate (cost=953220.83..953221.69 rows=86 width=48) (actual time=2376.795..2376.905 rows=201 loops=1) Group Key: notifications.notification_type, notifications.notification_status, notifications.service_id -> Bitmap Heap Scan on notifications (cost=29883.14..947856.24 rows=536459 width=48) (actual time=270.061..1887.754 rows=644735 loops=1) Recheck Cond: ((created_at >= '2018-03-20 00:00:00'::timestamp without time zone) AND (created_at < '2018-03-21 00:00:00'::timestamp without time zone)) Rows Removed by Index Recheck: 947427 Filter: ((key_type)::text <> 'TEST'::text) Heap Blocks: exact=40882 lossy=186483 -> Bitmap Index Scan on ix_notifications_created_at (cost=0.00..29749.02 rows=536459 width=0) (actual time=258.631..258.631 rows=644849 loops=1) Index Cond: ((created_at >= '2018-03-20 00:00:00'::timestamp without time zone) AND (created_at < '2018-03-21 00:00:00'::timestamp without time zone)) Planning time: 0.548 ms Execution time: 2383.485 ms
2018-03-20 15:48:32 +00:00
Notification.created_at >= start_date,
Notification.created_at < end_date
).group_by(
Notification.notification_type,
Notification.status,
Notification.service_id
)
if not include_from_test_key:
subquery = subquery.filter(Notification.key_type != KEY_TYPE_TEST)
subquery = subquery.subquery()
query = db.session.query(
Service.id.label('service_id'),
Service.name,
Service.restricted,
Service.research_mode,
Service.active,
Service.created_at,
subquery.c.notification_type,
subquery.c.status,
subquery.c.count
).outerjoin(
subquery,
subquery.c.service_id == Service.id
).order_by(Service.id)
if only_active:
query = query.filter(Service.active)
return query.all()
@autocommit
@version_class(
VersionOptions(ApiKey, must_write_history=False),
VersionOptions(Service),
)
def dao_suspend_service(service_id):
# have to eager load api keys so that we don't flush when we loop through them
# to ensure that db.session still contains the models when it comes to creating history objects
service = Service.query.options(
joinedload('api_keys'),
).filter(Service.id == service_id).one()
for api_key in service.api_keys:
if not api_key.expiry_date:
api_key.expiry_date = datetime.utcnow()
service.active = False
@autocommit
@version_class(Service)
def dao_resume_service(service_id):
service = Service.query.get(service_id)
service.active = True
def dao_fetch_active_users_for_service(service_id):
query = User.query.filter(
User.services.any(id=service_id),
User.state == 'active'
)
return query.all()
def dao_find_services_sending_to_tv_numbers(start_date, end_date, threshold=500):
return db.session.query(
Notification.service_id.label('service_id'),
func.count(Notification.id).label('notification_count')
).filter(
Notification.service_id == Service.id,
Notification.created_at >= start_date,
Notification.created_at <= end_date,
Notification.key_type != KEY_TYPE_TEST,
Notification.notification_type == SMS_TYPE,
func.substr(Notification.normalised_to, 3, 7) == '7700900',
Service.restricted == False, # noqa
Service.research_mode == False, # noqa
Service.active == True, # noqa
).group_by(
Notification.service_id,
).having(
func.count(Notification.id) > threshold
).all()
def dao_find_services_with_high_failure_rates(start_date, end_date, threshold=10000):
subquery = db.session.query(
func.count(Notification.id).label('total_count'),
Notification.service_id.label('service_id')
).filter(
Notification.service_id == Service.id,
Notification.created_at >= start_date,
Notification.created_at <= end_date,
Notification.key_type != KEY_TYPE_TEST,
Notification.notification_type == SMS_TYPE,
Service.restricted == False, # noqa
Service.research_mode == False, # noqa
Service.active == True, # noqa
).group_by(
Notification.service_id,
).having(
func.count(Notification.id) >= threshold
)
subquery = subquery.subquery()
query = db.session.query(
Notification.service_id.label('service_id'),
func.count(Notification.id).label('permanent_failure_count'),
subquery.c.total_count.label('total_count'),
(cast(func.count(Notification.id), Float) / cast(subquery.c.total_count, Float)).label('permanent_failure_rate')
).join(
subquery,
subquery.c.service_id == Notification.service_id
).filter(
Notification.service_id == Service.id,
Notification.created_at >= start_date,
Notification.created_at <= end_date,
Notification.key_type != KEY_TYPE_TEST,
Notification.notification_type == SMS_TYPE,
Notification.status == NOTIFICATION_PERMANENT_FAILURE,
Service.restricted == False, # noqa
Service.research_mode == False, # noqa
Service.active == True, # noqa
).group_by(
Notification.service_id,
subquery.c.total_count
).having(
cast(func.count(Notification.id), Float) / cast(subquery.c.total_count, Float) >= 0.25
)
return query.all()
def get_live_services_with_organisation():
query = db.session.query(
Service.id.label("service_id"),
Service.name.label("service_name"),
Organisation.id.label("organisation_id"),
Organisation.name.label("organisation_name")
).outerjoin(
Service.organisation
).filter(
Service.count_as_live.is_(True),
Service.active.is_(True),
Service.restricted.is_(False)
).order_by(
Organisation.name,
Service.name
)
return query.all()