mirror of
https://github.com/GSA/notifications-api.git
synced 2026-02-01 23:55:58 -05:00
merge from main and code review feedback
This commit is contained in:
@@ -264,6 +264,9 @@ def file_exists(file_location):
|
||||
|
||||
|
||||
def get_job_location(service_id, job_id):
|
||||
current_app.logger.info(
|
||||
f"#s3-partitioning NEW JOB_LOCATION: {NEW_FILE_LOCATION_STRUCTURE.format(service_id, job_id)}"
|
||||
)
|
||||
return (
|
||||
current_app.config["CSV_UPLOAD_BUCKET"]["bucket"],
|
||||
NEW_FILE_LOCATION_STRUCTURE.format(service_id, job_id),
|
||||
@@ -279,6 +282,9 @@ def get_old_job_location(service_id, job_id):
|
||||
but it will take a few days where we have to support both formats.
|
||||
Remove this when everything works with the NEW_FILE_LOCATION_STRUCTURE.
|
||||
"""
|
||||
current_app.logger.info(
|
||||
f"#s3-partitioning OLD JOB LOCATION: {FILE_LOCATION_STRUCTURE.format(service_id, job_id)}"
|
||||
)
|
||||
return (
|
||||
current_app.config["CSV_UPLOAD_BUCKET"]["bucket"],
|
||||
FILE_LOCATION_STRUCTURE.format(service_id, job_id),
|
||||
@@ -470,6 +476,9 @@ def get_personalisation_from_s3(service_id, job_id, job_row_number):
|
||||
|
||||
|
||||
def get_job_metadata_from_s3(service_id, job_id):
|
||||
current_app.logger.info(
|
||||
f"#s3-partitioning CALLING GET_JOB_METADATA with {service_id}, {job_id}"
|
||||
)
|
||||
obj = get_s3_object(*get_job_location(service_id, job_id))
|
||||
return obj.get()["Metadata"]
|
||||
|
||||
|
||||
@@ -189,51 +189,6 @@ def setup_commands(application):
|
||||
application.cli.add_command(command_group)
|
||||
|
||||
|
||||
# TODO maintainability what is the purpose of this command? Who would use it and why?
|
||||
# COMMENTING OUT UNTIL WE DETERMINE IF WE NEED IT OR NOT
|
||||
# @notify_command(name="rebuild-ft-billing-for-day")
|
||||
# @click.option("-s", "--service_id", required=False, type=click.UUID)
|
||||
# @click.option(
|
||||
# "-d",
|
||||
# "--day",
|
||||
# help="The date to recalculate, as YYYY-MM-DD",
|
||||
# required=True,
|
||||
# type=click_dt(format="%Y-%m-%d"),
|
||||
# )
|
||||
# def rebuild_ft_billing_for_day(service_id, day):
|
||||
|
||||
# """
|
||||
# Rebuild the data in ft_billing for the given service_id and date
|
||||
# """
|
||||
|
||||
# def rebuild_ft_data(process_day, service):
|
||||
# deleted_rows = delete_billing_data_for_service_for_day(process_day, service)
|
||||
# current_app.logger.info(
|
||||
# f"deleted {deleted_rows} existing billing rows for {service} on {process_day}"
|
||||
# )
|
||||
# transit_data = fetch_billing_data_for_day(
|
||||
# process_day=process_day, service_id=service
|
||||
# )
|
||||
# # transit_data = every row that should exist
|
||||
# for data in transit_data:
|
||||
# # upsert existing rows
|
||||
# update_fact_billing(data, process_day)
|
||||
# current_app.logger.info(
|
||||
# f"added/updated {len(transit_data)} billing rows for {service} on {process_day}"
|
||||
# )
|
||||
|
||||
# if service_id:
|
||||
# # confirm the service exists
|
||||
# dao_fetch_service_by_id(service_id)
|
||||
# rebuild_ft_data(day, service_id)
|
||||
# else:
|
||||
# services = get_service_ids_that_need_billing_populated(
|
||||
# get_midnight_in_utc(day), get_midnight_in_utc(day + timedelta(days=1))
|
||||
# )
|
||||
# for row in services:
|
||||
# rebuild_ft_data(day, row.service_id)
|
||||
|
||||
|
||||
@notify_command(name="bulk-invite-user-to-service")
|
||||
@click.option(
|
||||
"-f",
|
||||
@@ -467,32 +422,6 @@ def associate_services_to_organizations():
|
||||
current_app.logger.info("finished associating services to organizations")
|
||||
|
||||
|
||||
# TODO maintainability what is the purpose of this command? Who would use it and why?
|
||||
# COMMENTING OUT UNTIL WE DETERMINE IF WE NEED IT OR NOT
|
||||
# @notify_command(name="populate-service-volume-intentions")
|
||||
# @click.option(
|
||||
# "-f",
|
||||
# "--file_name",
|
||||
# required=True,
|
||||
# help="Pipe delimited file containing service_id, SMS, email",
|
||||
# )
|
||||
# def populate_service_volume_intentions(file_name):
|
||||
# # [0] service_id
|
||||
# # [1] SMS:: volume intentions for service
|
||||
# # [2] Email:: volume intentions for service
|
||||
|
||||
|
||||
# with open(file_name, "r") as f:
|
||||
# for line in itertools.islice(f, 1, None):
|
||||
# columns = line.split(",")
|
||||
# current_app.logger.info(columns)
|
||||
# service = dao_fetch_service_by_id(columns[0])
|
||||
# service.volume_sms = columns[1]
|
||||
# service.volume_email = columns[2]
|
||||
# dao_update_service(service)
|
||||
# current_app.logger.info("populate-service-volume-intentions complete")
|
||||
|
||||
|
||||
@notify_command(name="populate-go-live")
|
||||
@click.option(
|
||||
"-f", "--file_name", required=True, help="CSV file containing live service data"
|
||||
|
||||
@@ -175,6 +175,7 @@ def create_job(service_id):
|
||||
original_file_name = data.get("original_file_name")
|
||||
data.update({"service": service_id})
|
||||
try:
|
||||
current_app.logger.info(f"#s3-partitioning DATA IN CREATE_JOB: {data}")
|
||||
data.update(**get_job_metadata_from_s3(service_id, data["id"]))
|
||||
except KeyError:
|
||||
raise InvalidRequest(
|
||||
|
||||
@@ -453,16 +453,6 @@ def get_all_notifications_for_service(service_id):
|
||||
data = notifications_filter_schema.load(MultiDict(request.get_json()))
|
||||
current_app.logger.debug(f"use POST, request {request.get_json()} data {data}")
|
||||
|
||||
# if data.get("to"):
|
||||
# notification_type = (
|
||||
# data.get("template_type")[0] if data.get("template_type") else None
|
||||
# )
|
||||
# return search_for_notification_by_to_field(
|
||||
# service_id=service_id,
|
||||
# search_term=data["to"],
|
||||
# statuses=data.get("status"),
|
||||
# notification_type=notification_type,
|
||||
# )
|
||||
page = data["page"] if "page" in data else 1
|
||||
page_size = (
|
||||
data["page_size"]
|
||||
@@ -583,53 +573,6 @@ def get_notification_for_service(service_id, notification_id):
|
||||
)
|
||||
|
||||
|
||||
# def search_for_notification_by_to_field(
|
||||
# service_id, search_term, statuses, notification_type
|
||||
# ):
|
||||
# results = notifications_dao.dao_get_notifications_by_recipient_or_reference(
|
||||
# service_id=service_id,
|
||||
# search_term=search_term,
|
||||
# statuses=statuses,
|
||||
# notification_type=notification_type,
|
||||
# page=1,
|
||||
# page_size=current_app.config["PAGE_SIZE"],
|
||||
# )
|
||||
|
||||
# # We try and get the next page of results to work out if we need provide a pagination link to the next page
|
||||
# # in our response. Note, this was previously be done by having
|
||||
# # notifications_dao.dao_get_notifications_by_recipient_or_reference use count=False when calling
|
||||
# # Flask-Sqlalchemys `paginate'. But instead we now use this way because it is much more performant for
|
||||
# # services with many results (unlike using Flask SqlAlchemy `paginate` with `count=True`, this approach
|
||||
# # doesn't do an additional query to count all the results of which there could be millions but instead only
|
||||
# # asks for a single extra page of results).
|
||||
# next_page_of_pagination = notifications_dao.dao_get_notifications_by_recipient_or_reference(
|
||||
# service_id=service_id,
|
||||
# search_term=search_term,
|
||||
# statuses=statuses,
|
||||
# notification_type=notification_type,
|
||||
# page=2,
|
||||
# page_size=current_app.config["PAGE_SIZE"],
|
||||
# error_out=False, # False so that if there are no results, it doesn't end in aborting with a 404
|
||||
# )
|
||||
|
||||
# return (
|
||||
# jsonify(
|
||||
# notifications=notification_with_template_schema.dump(
|
||||
# results.items, many=True
|
||||
# ),
|
||||
# links=get_prev_next_pagination_links(
|
||||
# 1,
|
||||
# len(next_page_of_pagination.items),
|
||||
# ".get_all_notifications_for_service",
|
||||
# statuses=statuses,
|
||||
# notification_type=notification_type,
|
||||
# service_id=service_id,
|
||||
# ),
|
||||
# ),
|
||||
# 200,
|
||||
# )
|
||||
|
||||
|
||||
@service_blueprint.route("/<uuid:service_id>/notifications/monthly", methods=["GET"])
|
||||
def get_monthly_notification_stats(service_id):
|
||||
# check service_id validity
|
||||
|
||||
@@ -86,7 +86,7 @@ def _create_service_invite(invited_user, invite_link_host):
|
||||
redis_store.set(
|
||||
f"email-personalisation-{saved_notification.id}",
|
||||
json.dumps(personalisation),
|
||||
ex=1800,
|
||||
ex=2 * 24 * 60 * 60,
|
||||
)
|
||||
send_notification_to_queue(saved_notification, queue=QueueNames.NOTIFY)
|
||||
|
||||
|
||||
@@ -599,7 +599,6 @@ def fetch_user_by_email():
|
||||
fetched_user = get_user_by_email(email["email"])
|
||||
debug_not_production(hilite(f"fetched user is {fetched_user}"))
|
||||
result = fetched_user.serialize()
|
||||
debug_not_production(hilite(f"result is serialized to {result}"))
|
||||
return jsonify(data=result)
|
||||
except Exception as e:
|
||||
debug_not_production(hilite(f"Failed with {e}!!"))
|
||||
|
||||
Reference in New Issue
Block a user