set job status to error in the check_job_status scheduled task

the process_incomplete_jobs task runs through all incomplete jobs in
a loop, so it might not get a chance to update the processing_started
time of the last job before check_job_status runs again (every minute).
So before we even trigger the process_incomplete_jobs task, lets set
the status of the jobs to error, so that we don't identify them for
re-processing again.
This commit is contained in:
Leo Hemsted
2018-03-09 16:34:47 +00:00
parent f0ca3d40de
commit 64bb94af9e
4 changed files with 46 additions and 3 deletions

View File

@@ -45,13 +45,15 @@ from app.dao.provider_details_dao import (
dao_toggle_sms_provider
)
from app.dao.users_dao import delete_codes_older_created_more_than_a_day_ago
from app.dao.jobs_dao import dao_update_job
from app.models import (
Job,
Notification,
NOTIFICATION_SENDING,
LETTER_TYPE,
JOB_STATUS_IN_PROGRESS,
JOB_STATUS_READY_TO_SEND
JOB_STATUS_READY_TO_SEND,
JOB_STATUS_ERROR
)
from app.notifications.process_notifications import send_notification_to_queue
from app.celery.tasks import (
@@ -442,7 +444,14 @@ def check_job_status():
and_(thirty_five_minutes_ago < Job.processing_started, Job.processing_started < thirty_minutes_ago)
).order_by(Job.processing_started).all()
job_ids = [str(x.id) for x in jobs_not_complete_after_30_minutes]
# temporarily mark them as ERROR so that they don't get picked up by future check_job_status tasks
# if they haven't been re-processed in time.
job_ids = []
for job in jobs_not_complete_after_30_minutes:
job.job_status = JOB_STATUS_ERROR
dao_update_job(job)
job_ids.append(str(job.id))
if job_ids:
notify_celery.send_task(
name=TaskNames.PROCESS_INCOMPLETE_JOBS,

View File

@@ -560,6 +560,7 @@ def process_incomplete_job(job_id):
job = dao_get_job_by_id(job_id)
# reset the processing start time so that the check_job_status scheduled task doesn't pick this job up again
job.job_status = JOB_STATUS_PENDING
job.processing_started = datetime.utcnow()
dao_update_job(job)