mirror of
https://github.com/GSA/notifications-api.git
synced 2026-02-02 17:31:14 -05:00
Merge pull request #1852 from alphagov/read-job-meta-from-s3
Read job metadata from S3 metadata
This commit is contained in:
@@ -18,17 +18,25 @@ def get_s3_object(bucket_name, file_location):
|
|||||||
return s3.Object(bucket_name, file_location)
|
return s3.Object(bucket_name, file_location)
|
||||||
|
|
||||||
|
|
||||||
|
def get_job_location(service_id, job_id):
|
||||||
|
return (
|
||||||
|
current_app.config['CSV_UPLOAD_BUCKET_NAME'],
|
||||||
|
FILE_LOCATION_STRUCTURE.format(service_id, job_id),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_job_from_s3(service_id, job_id):
|
def get_job_from_s3(service_id, job_id):
|
||||||
bucket_name = current_app.config['CSV_UPLOAD_BUCKET_NAME']
|
obj = get_s3_object(*get_job_location(service_id, job_id))
|
||||||
file_location = FILE_LOCATION_STRUCTURE.format(service_id, job_id)
|
|
||||||
obj = get_s3_object(bucket_name, file_location)
|
|
||||||
return obj.get()['Body'].read().decode('utf-8')
|
return obj.get()['Body'].read().decode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
|
def get_job_metadata_from_s3(service_id, job_id):
|
||||||
|
obj = get_s3_object(*get_job_location(service_id, job_id))
|
||||||
|
return obj.get()['Metadata']
|
||||||
|
|
||||||
|
|
||||||
def remove_job_from_s3(service_id, job_id):
|
def remove_job_from_s3(service_id, job_id):
|
||||||
bucket_name = current_app.config['CSV_UPLOAD_BUCKET_NAME']
|
return remove_s3_object(*get_job_location(service_id, job_id))
|
||||||
file_location = FILE_LOCATION_STRUCTURE.format(service_id, job_id)
|
|
||||||
return remove_s3_object(bucket_name, file_location)
|
|
||||||
|
|
||||||
|
|
||||||
def get_s3_bucket_objects(bucket_name, subfolder='', older_than=7, limit_days=2):
|
def get_s3_bucket_objects(bucket_name, subfolder='', older_than=7, limit_days=2):
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ from flask import (
|
|||||||
current_app
|
current_app
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from app.aws.s3 import get_job_metadata_from_s3
|
||||||
from app.dao.jobs_dao import (
|
from app.dao.jobs_dao import (
|
||||||
dao_create_job,
|
dao_create_job,
|
||||||
dao_update_job,
|
dao_update_job,
|
||||||
@@ -119,11 +120,22 @@ def create_job(service_id):
|
|||||||
data.update({
|
data.update({
|
||||||
"service": service_id
|
"service": service_id
|
||||||
})
|
})
|
||||||
|
try:
|
||||||
|
data.update(
|
||||||
|
**get_job_metadata_from_s3(service_id, data['id'])
|
||||||
|
)
|
||||||
|
except KeyError:
|
||||||
|
raise InvalidRequest({'id': ['Missing data for required field.']}, status_code=400)
|
||||||
|
|
||||||
|
data['template'] = data.pop('template_id')
|
||||||
template = dao_get_template_by_id(data['template'])
|
template = dao_get_template_by_id(data['template'])
|
||||||
|
|
||||||
if template.template_type == LETTER_TYPE and service.restricted:
|
if template.template_type == LETTER_TYPE and service.restricted:
|
||||||
raise InvalidRequest("Create letter job is not allowed for service in trial mode ", 403)
|
raise InvalidRequest("Create letter job is not allowed for service in trial mode ", 403)
|
||||||
|
|
||||||
|
if data.get('valid') != 'True':
|
||||||
|
raise InvalidRequest("File is not valid, can't create job", 400)
|
||||||
|
|
||||||
errors = unarchived_template_schema.validate({'archived': template.archived})
|
errors = unarchived_template_schema.validate({'archived': template.archived})
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user