Put pdf letter validation failed message in S3 metadata

So it can be used to tell the user why the letter has failed validation
This commit is contained in:
Pea Tyczynska
2019-10-11 12:24:53 +01:00
parent 0152dcf254
commit 0a617379c4
3 changed files with 93 additions and 53 deletions

View File

@@ -127,10 +127,21 @@ def move_error_pdf_to_scan_bucket(source_filename):
_move_s3_object(scan_bucket, error_file, scan_bucket, source_filename)
def move_scan_to_invalid_pdf_bucket(source_filename):
scan_bucket = current_app.config['LETTERS_SCAN_BUCKET_NAME']
invalid_pdf_bucket = current_app.config['INVALID_PDF_BUCKET_NAME']
_move_s3_object(scan_bucket, source_filename, invalid_pdf_bucket, source_filename)
def move_scan_to_invalid_pdf_bucket(source_filename, message=None, invalid_pages=None, page_count=None):
metadata = {}
if message:
metadata["validation_failed_message"] = message
if invalid_pages:
metadata["invalid_pages"] = [str(p) for p in invalid_pages]
if page_count:
metadata["page_count"] = str(page_count)
_move_s3_object(
source_bucket=current_app.config['LETTERS_SCAN_BUCKET_NAME'],
source_filename=source_filename,
target_bucket=current_app.config['INVALID_PDF_BUCKET_NAME'],
target_filename=source_filename,
metadata=metadata
)
def move_uploaded_pdf_to_letters_bucket(source_filename, upload_filename):
@@ -138,7 +149,7 @@ def move_uploaded_pdf_to_letters_bucket(source_filename, upload_filename):
source_bucket=current_app.config['TRANSIENT_UPLOADED_LETTERS'],
source_filename=source_filename,
target_bucket=current_app.config['LETTERS_PDF_BUCKET_NAME'],
target_filename=upload_filename
target_filename=upload_filename,
)
@@ -164,7 +175,7 @@ def get_letter_pdf(notification):
return obj.get()["Body"].read()
def _move_s3_object(source_bucket, source_filename, target_bucket, target_filename):
def _move_s3_object(source_bucket, source_filename, target_bucket, target_filename, metadata=None):
s3 = boto3.resource('s3')
copy_source = {'Bucket': source_bucket, 'Key': source_filename}
@@ -174,7 +185,10 @@ def _move_s3_object(source_bucket, source_filename, target_bucket, target_filena
# Tags are copied across but the expiration time is reset in the destination bucket
# e.g. if a file has 5 days left to expire on a ONE_WEEK retention in the source bucket,
# in the destination bucket the expiration time will be reset to 7 days left to expire
obj.copy(copy_source, ExtraArgs={'ServerSideEncryption': 'AES256'})
put_args = {'ServerSideEncryption': 'AES256'}
if metadata:
metadata = put_args['Metadata'] = metadata
obj.copy(copy_source, ExtraArgs=put_args)
s3.Object(source_bucket, source_filename).delete()