2016-09-21 14:35:23 +01:00
|
|
|
from datetime import datetime
|
|
|
|
|
|
2016-10-11 14:30:40 +01:00
|
|
|
from flask import current_app
|
2016-09-21 14:35:23 +01:00
|
|
|
from sqlalchemy import func, desc, asc, cast, Date as sql_date
|
|
|
|
|
|
2016-01-15 11:12:05 +00:00
|
|
|
from app import db
|
2016-05-25 11:13:49 +01:00
|
|
|
from app.dao import days_ago
|
2017-03-14 10:50:09 +00:00
|
|
|
from app.models import (Job,
|
|
|
|
|
Notification,
|
|
|
|
|
NotificationHistory,
|
2017-04-07 11:50:56 +01:00
|
|
|
Template,
|
2017-03-14 10:50:09 +00:00
|
|
|
JOB_STATUS_SCHEDULED,
|
2017-04-07 11:50:56 +01:00
|
|
|
JOB_STATUS_PENDING,
|
|
|
|
|
LETTER_TYPE)
|
2016-08-23 16:46:58 +01:00
|
|
|
from app.statsd_decorators import statsd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@statsd(namespace="dao")
|
|
|
|
|
def dao_get_notification_outcomes_for_job(service_id, job_id):
|
|
|
|
|
query = db.session.query(
|
|
|
|
|
func.count(NotificationHistory.status).label('count'),
|
|
|
|
|
NotificationHistory.status.label('status')
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return query \
|
|
|
|
|
.filter(NotificationHistory.service_id == service_id) \
|
|
|
|
|
.filter(NotificationHistory.job_id == job_id)\
|
|
|
|
|
.group_by(NotificationHistory.status) \
|
|
|
|
|
.order_by(asc(NotificationHistory.status)) \
|
|
|
|
|
.all()
|
2016-01-15 11:12:05 +00:00
|
|
|
|
|
|
|
|
|
2017-03-14 10:50:09 +00:00
|
|
|
@statsd(namespace="dao")
|
2017-03-15 15:26:58 +00:00
|
|
|
def all_notifications_are_created_for_job(job_id):
|
|
|
|
|
query = db.session.query(func.count(Notification.id), Job.id)\
|
2017-03-14 10:50:09 +00:00
|
|
|
.join(Job)\
|
|
|
|
|
.filter(Job.id == job_id)\
|
|
|
|
|
.group_by(Job.id)\
|
2017-03-15 15:26:58 +00:00
|
|
|
.having(func.count(Notification.id) == Job.notification_count).all()
|
2017-03-14 10:50:09 +00:00
|
|
|
|
2017-03-17 10:01:28 +00:00
|
|
|
return query
|
2017-03-14 10:50:09 +00:00
|
|
|
|
|
|
|
|
|
2017-03-15 15:26:58 +00:00
|
|
|
@statsd(namespace="dao")
|
|
|
|
|
def dao_get_all_notifications_for_job(job_id):
|
2017-04-13 12:52:30 +01:00
|
|
|
return db.session.query(Notification).filter(Notification.job_id == job_id).order_by(Notification.created_at).all()
|
2017-03-15 15:26:58 +00:00
|
|
|
|
|
|
|
|
|
2016-02-24 17:12:30 +00:00
|
|
|
def dao_get_job_by_service_id_and_job_id(service_id, job_id):
|
2016-03-11 12:39:55 +00:00
|
|
|
return Job.query.filter_by(service_id=service_id, id=job_id).one()
|
2016-01-15 11:12:05 +00:00
|
|
|
|
|
|
|
|
|
2016-09-23 17:05:42 +01:00
|
|
|
def dao_get_jobs_by_service_id(service_id, limit_days=None, page=1, page_size=50, statuses=None):
|
2016-10-11 14:30:40 +01:00
|
|
|
query_filter = [
|
|
|
|
|
Job.service_id == service_id,
|
|
|
|
|
Job.original_file_name != current_app.config['TEST_MESSAGE_FILENAME']
|
|
|
|
|
]
|
2016-05-24 17:21:04 +01:00
|
|
|
if limit_days is not None:
|
2016-05-25 11:13:49 +01:00
|
|
|
query_filter.append(cast(Job.created_at, sql_date) >= days_ago(limit_days))
|
2016-09-23 17:05:42 +01:00
|
|
|
if statuses is not None and statuses != ['']:
|
2016-09-23 16:34:13 +01:00
|
|
|
query_filter.append(
|
|
|
|
|
Job.job_status.in_(statuses)
|
|
|
|
|
)
|
2016-09-21 14:35:23 +01:00
|
|
|
return Job.query \
|
|
|
|
|
.filter(*query_filter) \
|
2016-10-08 11:44:55 +01:00
|
|
|
.order_by(Job.processing_started.desc(), Job.created_at.desc()) \
|
2016-09-21 14:35:23 +01:00
|
|
|
.paginate(page=page, per_page=page_size)
|
2016-01-15 11:12:05 +00:00
|
|
|
|
|
|
|
|
|
2016-02-24 17:12:30 +00:00
|
|
|
def dao_get_job_by_id(job_id):
|
2016-03-11 15:34:20 +00:00
|
|
|
return Job.query.filter_by(id=job_id).one()
|
2016-02-24 17:12:30 +00:00
|
|
|
|
|
|
|
|
|
2016-10-07 12:28:42 +01:00
|
|
|
def dao_set_scheduled_jobs_to_pending():
|
|
|
|
|
"""
|
|
|
|
|
Sets all past scheduled jobs to pending, and then returns them for further processing.
|
|
|
|
|
|
|
|
|
|
this is used in the run_scheduled_jobs task, so we put a FOR UPDATE lock on the job table for the duration of
|
|
|
|
|
the transaction so that if the task is run more than once concurrently, one task will block the other select
|
|
|
|
|
from completing until it commits.
|
|
|
|
|
"""
|
|
|
|
|
jobs = Job.query \
|
2016-09-02 23:14:03 +01:00
|
|
|
.filter(
|
|
|
|
|
Job.job_status == JOB_STATUS_SCHEDULED,
|
|
|
|
|
Job.scheduled_for < datetime.utcnow()
|
|
|
|
|
) \
|
2016-08-24 16:24:30 +01:00
|
|
|
.order_by(asc(Job.scheduled_for)) \
|
2016-10-07 10:47:48 +01:00
|
|
|
.with_for_update() \
|
2016-08-24 16:24:30 +01:00
|
|
|
.all()
|
|
|
|
|
|
2016-10-07 12:28:42 +01:00
|
|
|
for job in jobs:
|
|
|
|
|
job.job_status = JOB_STATUS_PENDING
|
|
|
|
|
|
|
|
|
|
db.session.add_all(jobs)
|
|
|
|
|
db.session.commit()
|
|
|
|
|
|
|
|
|
|
return jobs
|
|
|
|
|
|
|
|
|
|
|
2016-09-01 14:31:01 +01:00
|
|
|
def dao_get_future_scheduled_job_by_id_and_service_id(job_id, service_id):
|
|
|
|
|
return Job.query \
|
2016-09-02 12:20:28 +01:00
|
|
|
.filter(
|
|
|
|
|
Job.service_id == service_id,
|
|
|
|
|
Job.id == job_id,
|
2016-09-02 23:13:32 +01:00
|
|
|
Job.job_status == JOB_STATUS_SCHEDULED,
|
2016-09-02 12:20:28 +01:00
|
|
|
Job.scheduled_for > datetime.utcnow()
|
|
|
|
|
) \
|
2016-09-01 14:31:01 +01:00
|
|
|
.one()
|
|
|
|
|
|
|
|
|
|
|
2016-02-24 17:12:30 +00:00
|
|
|
def dao_create_job(job):
|
|
|
|
|
db.session.add(job)
|
|
|
|
|
db.session.commit()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def dao_update_job(job):
|
|
|
|
|
db.session.add(job)
|
|
|
|
|
db.session.commit()
|
2016-09-07 15:36:07 +01:00
|
|
|
|
|
|
|
|
|
2017-04-05 11:57:56 +01:00
|
|
|
def dao_update_job_status(job_id, status):
|
|
|
|
|
db.session.query(Job).filter_by(id=job_id).update({'job_status': status})
|
|
|
|
|
db.session.commit()
|
|
|
|
|
|
|
|
|
|
|
Updates to the delete CSV file job to reduce the number of eligible jobs in any run
- previously this was unbounded, so it got all jobs older then 7 days. In excess of 75,000 🔥
- this meant that the job took (a) a long time and (b) a lot memory and (c) doing the same thing every day
These changes mean that the job has a 2 day eligible window for jobs, minimising the number of eligible jobs in a run, whilst still retaining some leeway in event if it failing one night.
In principle the job runs early morning on a given day. The previous 7 days are left along, and then the previous 2 days worth of files are deleted:
so:
runs on
31st
30,29,28,27,26,25,24 are ignored
23,22 jobs here have files deleted
21 and earlier are ignored.
2017-04-05 16:23:41 +01:00
|
|
|
def dao_get_jobs_older_than_limited_by(older_than=7, limit_days=2):
|
2016-09-07 15:36:07 +01:00
|
|
|
return Job.query.filter(
|
Updates to the delete CSV file job to reduce the number of eligible jobs in any run
- previously this was unbounded, so it got all jobs older then 7 days. In excess of 75,000 🔥
- this meant that the job took (a) a long time and (b) a lot memory and (c) doing the same thing every day
These changes mean that the job has a 2 day eligible window for jobs, minimising the number of eligible jobs in a run, whilst still retaining some leeway in event if it failing one night.
In principle the job runs early morning on a given day. The previous 7 days are left along, and then the previous 2 days worth of files are deleted:
so:
runs on
31st
30,29,28,27,26,25,24 are ignored
23,22 jobs here have files deleted
21 and earlier are ignored.
2017-04-05 16:23:41 +01:00
|
|
|
cast(Job.created_at, sql_date) < days_ago(older_than),
|
|
|
|
|
cast(Job.created_at, sql_date) >= days_ago(older_than + limit_days)
|
2016-09-07 15:36:07 +01:00
|
|
|
).order_by(desc(Job.created_at)).all()
|
2017-04-07 11:50:56 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def dao_get_all_letter_jobs():
|
|
|
|
|
return db.session.query(Job).join(Job.template).filter(
|
|
|
|
|
Template.template_type == LETTER_TYPE
|
|
|
|
|
).order_by(desc(Job.created_at)).all()
|