mirror of
https://github.com/GSA/notifications-api.git
synced 2025-12-22 16:31:15 -05:00
if we have too many returned letters, we'll exceed SQS's max task size of 256kb. Cap it to 5000 - this is probably a bit conservative but follows the initial values we used when implementing this for the collate-letters-task[^1]. Also follow the pattern of compressing the sqs payload just to reduce it a little more. [^1]: https://github.com/alphagov/notifications-api/pull/1536
28 lines
984 B
Python
28 lines
984 B
Python
from flask import Blueprint, jsonify, request
|
|
|
|
from app.celery.tasks import process_returned_letters_list
|
|
from app.config import QueueNames
|
|
from app.letters.letter_schemas import letter_references
|
|
from app.schema_validation import validate
|
|
from app.v2.errors import register_errors
|
|
|
|
letter_job = Blueprint("letter-job", __name__)
|
|
register_errors(letter_job)
|
|
|
|
# too many references will make SQS error (as the task can only be 256kb)
|
|
MAX_REFERENCES_PER_TASK = 5000
|
|
|
|
|
|
@letter_job.route('/letters/returned', methods=['POST'])
|
|
def create_process_returned_letters_job():
|
|
references = validate(request.get_json(), letter_references)['references']
|
|
|
|
for start_index in range(0, len(references), MAX_REFERENCES_PER_TASK):
|
|
process_returned_letters_list.apply_async(
|
|
args=(references[start_index:start_index + MAX_REFERENCES_PER_TASK], ),
|
|
queue=QueueNames.DATABASE,
|
|
compression='zlib'
|
|
)
|
|
|
|
return jsonify(references=references), 200
|