remove datetime.utcnow()

This commit is contained in:
Kenneth Kehl
2024-05-23 13:59:51 -07:00
parent 752a13fbd2
commit 905df17f65
83 changed files with 591 additions and 570 deletions

View File

@@ -27,7 +27,7 @@ from app.dao.service_data_retention_dao import (
)
from app.enums import NotificationType
from app.models import FactProcessingTime
from app.utils import get_midnight_in_utc
from app.utils import get_midnight_in_utc, utc_now
@notify_celery.task(name="remove_sms_email_jobs")
@@ -46,7 +46,7 @@ def _remove_csv_files(job_types):
@notify_celery.task(name="cleanup-unfinished-jobs")
def cleanup_unfinished_jobs():
now = datetime.utcnow()
now = utc_now()
jobs = dao_get_unfinished_jobs()
for job in jobs:
# The query already checks that the processing_finished time is null, so here we are saying
@@ -88,7 +88,7 @@ def _delete_notifications_older_than_retention_by_type(notification_type):
for f in flexible_data_retention:
day_to_delete_backwards_from = get_midnight_in_utc(
datetime.utcnow()
utc_now()
).date() - timedelta(days=f.days_of_retention)
delete_notifications_for_service_and_type.apply_async(
@@ -100,7 +100,7 @@ def _delete_notifications_older_than_retention_by_type(notification_type):
},
)
seven_days_ago = get_midnight_in_utc(datetime.utcnow()).date() - timedelta(days=7)
seven_days_ago = get_midnight_in_utc(utc_now()).date() - timedelta(days=7)
service_ids_with_data_retention = {x.service_id for x in flexible_data_retention}
@@ -136,14 +136,14 @@ def _delete_notifications_older_than_retention_by_type(notification_type):
def delete_notifications_for_service_and_type(
service_id, notification_type, datetime_to_delete_before
):
start = datetime.utcnow()
start = utc_now()
num_deleted = move_notifications_to_notification_history(
notification_type,
service_id,
datetime_to_delete_before,
)
if num_deleted:
end = datetime.utcnow()
end = utc_now()
current_app.logger.info(
f"delete-notifications-for-service-and-type: "
f"service: {service_id}, "
@@ -158,7 +158,7 @@ def delete_notifications_for_service_and_type(
def timeout_notifications():
notifications = ["dummy value so len() > 0"]
cutoff_time = datetime.utcnow() - timedelta(
cutoff_time = utc_now() - timedelta(
seconds=current_app.config.get("SENDING_NOTIFICATIONS_TIMEOUT_PERIOD")
)
@@ -179,11 +179,11 @@ def timeout_notifications():
@cronitor("delete-inbound-sms")
def delete_inbound_sms():
try:
start = datetime.utcnow()
start = utc_now()
deleted = delete_inbound_sms_older_than_retention()
current_app.logger.info(
"Delete inbound sms job started {} finished {} deleted {} inbound sms notifications".format(
start, datetime.utcnow(), deleted
start, utc_now(), deleted
)
)
except SQLAlchemyError:
@@ -197,7 +197,7 @@ def save_daily_notification_processing_time(local_date=None):
# local_date is a string in the format of "YYYY-MM-DD"
if local_date is None:
# if a date is not provided, we run against yesterdays data
local_date = (datetime.utcnow() - timedelta(days=1)).date()
local_date = (utc_now() - timedelta(days=1)).date()
else:
local_date = datetime.strptime(local_date, "%Y-%m-%d").date()

View File

@@ -1,4 +1,4 @@
from datetime import datetime, timedelta
from datetime import timedelta
import iso8601
from celery.exceptions import Retry
@@ -22,6 +22,7 @@ from app.dao.service_callback_api_dao import (
)
from app.enums import CallbackType, NotificationStatus
from app.models import Complaint
from app.utils import utc_now
@notify_celery.task(
@@ -57,7 +58,7 @@ def process_ses_results(self, response):
message_time = iso8601.parse_date(ses_message["mail"]["timestamp"]).replace(
tzinfo=None
)
if datetime.utcnow() - message_time < timedelta(minutes=5):
if utc_now() - message_time < timedelta(minutes=5):
current_app.logger.info(
f"Notification not found for reference: {reference}"
f"(while attempting update to {notification_status}). "

View File

@@ -1,6 +1,6 @@
import json
import os
from datetime import datetime, timedelta
from datetime import timedelta
from flask import current_app
from sqlalchemy.orm.exc import NoResultFound
@@ -18,6 +18,7 @@ from app.dao.notifications_dao import (
from app.delivery import send_to_providers
from app.enums import NotificationStatus
from app.exceptions import NotificationTechnicalFailureException
from app.utils import utc_now
# This is the amount of time to wait after sending an sms message before we check the aws logs and look for delivery
# receipts
@@ -113,9 +114,7 @@ def deliver_sms(self, notification_id):
message_id = send_to_providers.send_sms_to_provider(notification)
# We have to put it in UTC. For other timezones, the delay
# will be ignored and it will fire immediately (although this probably only affects developer testing)
my_eta = datetime.utcnow() + timedelta(
seconds=DELIVERY_RECEIPT_DELAY_IN_SECONDS
)
my_eta = utc_now() + timedelta(seconds=DELIVERY_RECEIPT_DELAY_IN_SECONDS)
check_sms_delivery_receipt.apply_async(
[message_id, notification_id, notification.created_at],
eta=my_eta,

View File

@@ -9,6 +9,7 @@ from app.dao.fact_billing_dao import fetch_billing_data_for_day, update_fact_bil
from app.dao.fact_notification_status_dao import update_fact_notification_status
from app.dao.notifications_dao import get_service_ids_with_notifications_on_date
from app.enums import NotificationType
from app.utils import utc_now
@notify_celery.task(name="create-nightly-billing")
@@ -17,7 +18,7 @@ def create_nightly_billing(day_start=None):
# day_start is a datetime.date() object. e.g.
# up to 4 days of data counting back from day_start is consolidated
if day_start is None:
day_start = datetime.utcnow().date() - timedelta(days=1)
day_start = utc_now().date() - timedelta(days=1)
else:
# When calling the task its a string in the format of "YYYY-MM-DD"
day_start = datetime.strptime(day_start, "%Y-%m-%d").date()
@@ -39,9 +40,9 @@ def create_nightly_billing_for_day(process_day):
f"create-nightly-billing-for-day task for {process_day}: started"
)
start = datetime.utcnow()
start = utc_now()
transit_data = fetch_billing_data_for_day(process_day=process_day)
end = datetime.utcnow()
end = utc_now()
current_app.logger.info(
f"create-nightly-billing-for-day task for {process_day}: data fetched in {(end - start).seconds} seconds"
@@ -78,7 +79,7 @@ def create_nightly_notification_status():
mean the aggregated results are temporarily incorrect.
"""
yesterday = datetime.utcnow().date() - timedelta(days=1)
yesterday = utc_now().date() - timedelta(days=1)
for notification_type in (NotificationType.SMS, NotificationType.EMAIL):
days = 4
@@ -107,14 +108,14 @@ def create_nightly_notification_status_for_service_and_day(
):
process_day = datetime.strptime(process_day, "%Y-%m-%d").date()
start = datetime.utcnow()
start = utc_now()
update_fact_notification_status(
process_day=process_day,
notification_type=notification_type,
service_id=service_id,
)
end = datetime.utcnow()
end = utc_now()
current_app.logger.info(
f"create-nightly-notification-status-for-service-and-day task update "
f"for {service_id}, {notification_type} for {process_day}: "

View File

@@ -1,4 +1,4 @@
from datetime import datetime, timedelta
from datetime import timedelta
from flask import current_app
from sqlalchemy import between
@@ -31,6 +31,7 @@ from app.dao.users_dao import delete_codes_older_created_more_than_a_day_ago
from app.enums import JobStatus, NotificationType
from app.models import Job
from app.notifications.process_notifications import send_notification_to_queue
from app.utils import utc_now
from notifications_utils.clients.zendesk.zendesk_client import NotifySupportTicket
MAX_NOTIFICATION_FAILS = 10000
@@ -52,11 +53,11 @@ def run_scheduled_jobs():
@notify_celery.task(name="delete-verify-codes")
def delete_verify_codes():
try:
start = datetime.utcnow()
start = utc_now()
deleted = delete_codes_older_created_more_than_a_day_ago()
current_app.logger.info(
"Delete job started {} finished {} deleted {} verify codes".format(
start, datetime.utcnow(), deleted
start, utc_now(), deleted
)
)
except SQLAlchemyError:
@@ -67,20 +68,20 @@ def delete_verify_codes():
@notify_celery.task(name="expire-or-delete-invitations")
def expire_or_delete_invitations():
try:
start = datetime.utcnow()
start = utc_now()
expired_invites = expire_invitations_created_more_than_two_days_ago()
current_app.logger.info(
f"Expire job started {start} finished {datetime.utcnow()} expired {expired_invites} invitations"
f"Expire job started {start} finished {utc_now()} expired {expired_invites} invitations"
)
except SQLAlchemyError:
current_app.logger.exception("Failed to expire invitations")
raise
try:
start = datetime.utcnow()
start = utc_now()
deleted_invites = delete_org_invitations_created_more_than_two_days_ago()
current_app.logger.info(
f"Delete job started {start} finished {datetime.utcnow()} deleted {deleted_invites} invitations"
f"Delete job started {start} finished {utc_now()} deleted {deleted_invites} invitations"
)
except SQLAlchemyError:
current_app.logger.exception("Failed to delete invitations")
@@ -101,8 +102,8 @@ def check_job_status():
update the job_status to 'error'
process the rows in the csv that are missing (in another task) just do the check here.
"""
thirty_minutes_ago = datetime.utcnow() - timedelta(minutes=30)
thirty_five_minutes_ago = datetime.utcnow() - timedelta(minutes=35)
thirty_minutes_ago = utc_now() - timedelta(minutes=30)
thirty_five_minutes_ago = utc_now() - timedelta(minutes=35)
incomplete_in_progress_jobs = Job.query.filter(
Job.job_status == JobStatus.IN_PROGRESS,
@@ -179,8 +180,8 @@ def check_for_missing_rows_in_completed_jobs():
name="check-for-services-with-high-failure-rates-or-sending-to-tv-numbers"
)
def check_for_services_with_high_failure_rates_or_sending_to_tv_numbers():
start_date = datetime.utcnow() - timedelta(days=1)
end_date = datetime.utcnow()
start_date = utc_now() - timedelta(days=1)
end_date = utc_now()
message = ""
services_with_failures = dao_find_services_with_high_failure_rates(

View File

@@ -1,5 +1,4 @@
import json
from datetime import datetime
from flask import current_app
from requests import HTTPError, RequestException, request
@@ -24,7 +23,7 @@ from app.notifications.process_notifications import persist_notification
from app.notifications.validators import check_service_over_total_message_limit
from app.serialised_models import SerialisedService, SerialisedTemplate
from app.service.utils import service_allowed_to_send_to
from app.utils import DATETIME_FORMAT
from app.utils import DATETIME_FORMAT, utc_now
from app.v2.errors import TotalRequestsError
from notifications_utils.recipients import RecipientCSV
@@ -32,7 +31,7 @@ from notifications_utils.recipients import RecipientCSV
@notify_celery.task(name="process-job")
def process_job(job_id, sender_id=None):
"""Update job status, get csv data from s3, and begin processing csv rows."""
start = datetime.utcnow()
start = utc_now()
job = dao_get_job_by_id(job_id)
current_app.logger.info(
"Starting process-job task for job id {} with status: {}".format(
@@ -82,7 +81,7 @@ def process_job(job_id, sender_id=None):
def job_complete(job, resumed=False, start=None):
job.job_status = JobStatus.FINISHED
finished = datetime.utcnow()
finished = utc_now()
job.processing_finished = finished
dao_update_job(job)
@@ -157,7 +156,7 @@ def __total_sending_limits_for_job_exceeded(service, job, job_id):
return False
except TotalRequestsError:
job.job_status = "sending limits exceeded"
job.processing_finished = datetime.utcnow()
job.processing_finished = utc_now()
dao_update_job(job)
current_app.logger.error(
"Job {} size {} error. Total sending limits {} exceeded".format(
@@ -211,7 +210,7 @@ def save_sms(self, service_id, notification_id, encrypted_notification, sender_i
notification_type=NotificationType.SMS,
api_key_id=None,
key_type=KeyType.NORMAL,
created_at=datetime.utcnow(),
created_at=utc_now(),
created_by_id=created_by_id,
job_id=notification.get("job", None),
job_row_number=notification.get("row_number", None),
@@ -272,7 +271,7 @@ def save_email(
notification_type=NotificationType.EMAIL,
api_key_id=None,
key_type=KeyType.NORMAL,
created_at=datetime.utcnow(),
created_at=utc_now(),
job_id=notification.get("job", None),
job_row_number=notification.get("row_number", None),
notification_id=notification_id,
@@ -438,7 +437,7 @@ def process_incomplete_jobs(job_ids):
# reset the processing start time so that the check_job_status scheduled task doesn't pick this job up again
for job in jobs:
job.job_status = JobStatus.IN_PROGRESS
job.processing_started = datetime.utcnow()
job.processing_started = utc_now()
dao_update_job(job)
current_app.logger.info("Resuming Job(s) {}".format(job_ids))