2018-02-20 11:22:17 +00:00
|
|
|
|
import csv
|
2018-02-06 11:02:54 +00:00
|
|
|
|
import os
|
2016-02-22 17:17:18 +00:00
|
|
|
|
import re
|
2016-10-27 17:31:13 +01:00
|
|
|
|
import unicodedata
|
2017-07-14 11:13:37 +01:00
|
|
|
|
from collections import namedtuple
|
2017-06-12 17:21:25 +01:00
|
|
|
|
from datetime import datetime, timedelta, timezone
|
2018-02-20 11:22:17 +00:00
|
|
|
|
from functools import wraps
|
|
|
|
|
|
from io import StringIO
|
|
|
|
|
|
from itertools import chain
|
|
|
|
|
|
from os import path
|
|
|
|
|
|
from urllib.parse import urlparse
|
2016-10-27 17:31:13 +01:00
|
|
|
|
|
2017-06-12 17:21:25 +01:00
|
|
|
|
import ago
|
2018-02-20 11:22:17 +00:00
|
|
|
|
import dateutil
|
2017-06-12 17:21:25 +01:00
|
|
|
|
import pyexcel
|
2018-02-20 11:22:17 +00:00
|
|
|
|
import pytz
|
|
|
|
|
|
import yaml
|
|
|
|
|
|
from flask import abort, current_app, redirect, request, session, url_for
|
|
|
|
|
|
from flask_login import current_user
|
2018-02-16 11:35:36 +00:00
|
|
|
|
from notifications_utils.recipients import RecipientCSV
|
2016-12-08 11:50:59 +00:00
|
|
|
|
from notifications_utils.template import (
|
|
|
|
|
|
EmailPreviewTemplate,
|
2017-04-28 16:04:52 +01:00
|
|
|
|
LetterImageTemplate,
|
2016-12-20 14:38:34 +00:00
|
|
|
|
LetterPreviewTemplate,
|
2018-02-20 11:22:17 +00:00
|
|
|
|
SMSPreviewTemplate,
|
2016-12-08 11:50:59 +00:00
|
|
|
|
)
|
2017-12-30 16:54:39 +00:00
|
|
|
|
from orderedset._orderedset import OrderedSet
|
|
|
|
|
|
from werkzeug.datastructures import MultiDict
|
2016-02-19 16:38:04 +00:00
|
|
|
|
|
2018-03-19 15:25:26 +00:00
|
|
|
|
SENDING_STATUSES = ['created', 'pending', 'sending', 'pending-virus-check']
|
2017-04-27 16:02:49 +01:00
|
|
|
|
DELIVERED_STATUSES = ['delivered', 'sent']
|
2018-03-19 15:25:26 +00:00
|
|
|
|
FAILURE_STATUSES = ['failed', 'temporary-failure', 'permanent-failure', 'technical-failure', 'virus-scan-failed']
|
2017-01-30 17:27:09 +00:00
|
|
|
|
REQUESTED_STATUSES = SENDING_STATUSES + DELIVERED_STATUSES + FAILURE_STATUSES
|
|
|
|
|
|
|
|
|
|
|
|
|
2018-02-28 18:13:29 +00:00
|
|
|
|
def user_has_permissions(*permissions, **permission_kwargs):
|
2016-02-19 16:38:04 +00:00
|
|
|
|
def wrap(func):
|
|
|
|
|
|
@wraps(func)
|
|
|
|
|
|
def wrap_func(*args, **kwargs):
|
2016-10-21 14:24:21 +01:00
|
|
|
|
if current_user and current_user.is_authenticated:
|
|
|
|
|
|
if current_user.has_permissions(
|
2017-10-15 15:02:01 +01:00
|
|
|
|
*permissions,
|
2018-02-28 18:13:29 +00:00
|
|
|
|
**permission_kwargs
|
2016-10-21 14:24:21 +01:00
|
|
|
|
):
|
|
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
|
else:
|
|
|
|
|
|
abort(403)
|
2016-02-29 14:57:07 +00:00
|
|
|
|
else:
|
2016-10-21 14:24:21 +01:00
|
|
|
|
abort(401)
|
2016-02-19 16:38:04 +00:00
|
|
|
|
return wrap_func
|
|
|
|
|
|
return wrap
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
2018-02-27 16:45:20 +00:00
|
|
|
|
def user_is_platform_admin(f):
|
|
|
|
|
|
@wraps(f)
|
|
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
|
|
if not current_user.is_authenticated:
|
|
|
|
|
|
abort(401)
|
|
|
|
|
|
if not current_user.platform_admin:
|
|
|
|
|
|
abort(403)
|
|
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-06-17 11:36:30 +01:00
|
|
|
|
def redirect_to_sign_in(f):
|
|
|
|
|
|
@wraps(f)
|
|
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
|
|
if 'user_details' not in session:
|
|
|
|
|
|
return redirect(url_for('main.sign_in'))
|
|
|
|
|
|
else:
|
|
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-03-07 18:47:05 +00:00
|
|
|
|
def get_errors_for_csv(recipients, template_type):
|
|
|
|
|
|
|
|
|
|
|
|
errors = []
|
|
|
|
|
|
|
2018-03-05 15:57:10 +00:00
|
|
|
|
if any(recipients.rows_with_bad_recipients):
|
2016-03-07 18:47:05 +00:00
|
|
|
|
number_of_bad_recipients = len(list(recipients.rows_with_bad_recipients))
|
|
|
|
|
|
if 'sms' == template_type:
|
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
|
errors.append("fix 1 phone number")
|
|
|
|
|
|
else:
|
|
|
|
|
|
errors.append("fix {} phone numbers".format(number_of_bad_recipients))
|
|
|
|
|
|
elif 'email' == template_type:
|
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
|
errors.append("fix 1 email address")
|
|
|
|
|
|
else:
|
|
|
|
|
|
errors.append("fix {} email addresses".format(number_of_bad_recipients))
|
2016-11-10 14:10:39 +00:00
|
|
|
|
elif 'letter' == template_type:
|
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
|
errors.append("fix 1 address")
|
|
|
|
|
|
else:
|
|
|
|
|
|
errors.append("fix {} addresses".format(number_of_bad_recipients))
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
2018-03-05 15:57:10 +00:00
|
|
|
|
if any(recipients.rows_with_missing_data):
|
2016-03-07 18:47:05 +00:00
|
|
|
|
number_of_rows_with_missing_data = len(list(recipients.rows_with_missing_data))
|
|
|
|
|
|
if 1 == number_of_rows_with_missing_data:
|
2016-04-18 11:27:23 +01:00
|
|
|
|
errors.append("enter missing data in 1 row")
|
2016-03-07 18:47:05 +00:00
|
|
|
|
else:
|
2016-04-18 11:27:23 +01:00
|
|
|
|
errors.append("enter missing data in {} rows".format(number_of_rows_with_missing_data))
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
|
|
return errors
|
2016-03-16 16:57:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
2017-01-13 11:35:27 +00:00
|
|
|
|
def generate_notifications_csv(**kwargs):
|
|
|
|
|
|
from app import notification_api_client
|
2018-02-16 11:35:36 +00:00
|
|
|
|
from app.main.s3_client import s3download
|
2017-01-13 11:35:27 +00:00
|
|
|
|
if 'page' not in kwargs:
|
|
|
|
|
|
kwargs['page'] = 1
|
2018-01-12 14:03:31 +00:00
|
|
|
|
|
2018-02-16 11:35:36 +00:00
|
|
|
|
if kwargs.get('job_id'):
|
|
|
|
|
|
original_file_contents = s3download(kwargs['service_id'], kwargs['job_id'])
|
|
|
|
|
|
original_upload = RecipientCSV(
|
|
|
|
|
|
original_file_contents,
|
|
|
|
|
|
template_type=kwargs['template_type'],
|
|
|
|
|
|
)
|
|
|
|
|
|
original_column_headers = original_upload.column_headers
|
|
|
|
|
|
fieldnames = ['Row number'] + original_column_headers + ['Template', 'Type', 'Job', 'Status', 'Time']
|
2018-01-12 14:03:31 +00:00
|
|
|
|
else:
|
|
|
|
|
|
fieldnames = ['Recipient', 'Template', 'Type', 'Job', 'Status', 'Time']
|
|
|
|
|
|
|
2017-04-20 14:55:14 +01:00
|
|
|
|
yield ','.join(fieldnames) + '\n'
|
2017-01-13 11:35:27 +00:00
|
|
|
|
|
|
|
|
|
|
while kwargs['page']:
|
|
|
|
|
|
notifications_resp = notification_api_client.get_notifications_for_service(**kwargs)
|
2018-02-16 12:34:59 +00:00
|
|
|
|
for notification in notifications_resp['notifications']:
|
|
|
|
|
|
if kwargs.get('job_id'):
|
2018-02-16 11:35:36 +00:00
|
|
|
|
values = [
|
|
|
|
|
|
notification['row_number'],
|
|
|
|
|
|
] + [
|
2018-03-05 15:57:10 +00:00
|
|
|
|
original_upload[notification['row_number'] - 1].get(header).data
|
2018-02-16 11:35:36 +00:00
|
|
|
|
for header in original_column_headers
|
|
|
|
|
|
] + [
|
|
|
|
|
|
notification['template_name'],
|
|
|
|
|
|
notification['template_type'],
|
|
|
|
|
|
notification['job_name'],
|
|
|
|
|
|
notification['status'],
|
2018-02-16 12:34:59 +00:00
|
|
|
|
notification['created_at'],
|
2018-02-16 11:35:36 +00:00
|
|
|
|
]
|
2018-02-16 12:34:59 +00:00
|
|
|
|
else:
|
2018-01-12 14:03:31 +00:00
|
|
|
|
values = [
|
|
|
|
|
|
notification['to'],
|
|
|
|
|
|
notification['template']['name'],
|
|
|
|
|
|
notification['template']['template_type'],
|
|
|
|
|
|
notification.get('job_name', None),
|
|
|
|
|
|
notification['status'],
|
|
|
|
|
|
notification['created_at'],
|
|
|
|
|
|
notification['updated_at']
|
|
|
|
|
|
]
|
2018-03-06 15:11:59 +00:00
|
|
|
|
yield Spreadsheet.from_rows([map(str, values)]).as_csv_data
|
2018-02-16 12:34:59 +00:00
|
|
|
|
|
2017-01-13 11:35:27 +00:00
|
|
|
|
if notifications_resp['links'].get('next'):
|
|
|
|
|
|
kwargs['page'] += 1
|
|
|
|
|
|
else:
|
|
|
|
|
|
return
|
2017-06-12 17:21:25 +01:00
|
|
|
|
raise Exception("Should never reach here")
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-03-16 16:57:10 +00:00
|
|
|
|
def get_page_from_request():
|
|
|
|
|
|
if 'page' in request.args:
|
|
|
|
|
|
try:
|
|
|
|
|
|
return int(request.args['page'])
|
|
|
|
|
|
except ValueError:
|
|
|
|
|
|
return None
|
|
|
|
|
|
else:
|
|
|
|
|
|
return 1
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-10-10 14:50:49 +01:00
|
|
|
|
def generate_previous_dict(view, service_id, page, url_args=None):
|
2016-10-10 17:15:57 +01:00
|
|
|
|
return generate_previous_next_dict(view, service_id, page - 1, 'Previous page', url_args or {})
|
2016-10-10 14:50:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_next_dict(view, service_id, page, url_args=None):
|
2016-10-10 17:15:57 +01:00
|
|
|
|
return generate_previous_next_dict(view, service_id, page + 1, 'Next page', url_args or {})
|
2016-10-10 14:50:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_previous_next_dict(view, service_id, page, title, url_args):
|
2016-03-16 16:57:10 +00:00
|
|
|
|
return {
|
2016-10-10 14:50:49 +01:00
|
|
|
|
'url': url_for(view, service_id=service_id, page=page, **url_args),
|
2016-03-16 16:57:10 +00:00
|
|
|
|
'title': title,
|
2016-10-10 14:50:49 +01:00
|
|
|
|
'label': 'page {}'.format(page)
|
2016-03-16 16:57:10 +00:00
|
|
|
|
}
|
2016-03-30 17:12:00 +01:00
|
|
|
|
|
|
|
|
|
|
|
2016-10-07 10:59:32 +01:00
|
|
|
|
def email_safe(string, whitespace='.'):
|
2016-10-27 17:31:13 +01:00
|
|
|
|
# strips accents, diacritics etc
|
|
|
|
|
|
string = ''.join(c for c in unicodedata.normalize('NFD', string) if unicodedata.category(c) != 'Mn')
|
|
|
|
|
|
string = ''.join(
|
|
|
|
|
|
word.lower() if word.isalnum() or word == whitespace else ''
|
|
|
|
|
|
for word in re.sub(r'\s+', whitespace, string.strip())
|
|
|
|
|
|
)
|
|
|
|
|
|
string = re.sub(r'\.{2,}', '.', string)
|
|
|
|
|
|
return string.strip('.')
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Spreadsheet():
|
|
|
|
|
|
|
|
|
|
|
|
allowed_file_extensions = ['csv', 'xlsx', 'xls', 'ods', 'xlsm', 'tsv']
|
|
|
|
|
|
|
2016-05-15 10:47:52 +01:00
|
|
|
|
def __init__(self, csv_data, filename=''):
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
self.filename = filename
|
|
|
|
|
|
self.as_csv_data = csv_data
|
|
|
|
|
|
self.as_dict = {
|
|
|
|
|
|
'file_name': self.filename,
|
|
|
|
|
|
'data': self.as_csv_data
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
|
def can_handle(cls, filename):
|
|
|
|
|
|
return cls.get_extension(filename) in cls.allowed_file_extensions
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def get_extension(filename):
|
|
|
|
|
|
return path.splitext(filename)[1].lower().lstrip('.')
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def normalise_newlines(file_content):
|
2016-07-07 11:52:57 +01:00
|
|
|
|
return '\r\n'.join(file_content.read().decode('utf-8').splitlines())
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
|
|
@classmethod
|
2016-05-15 10:47:52 +01:00
|
|
|
|
def from_rows(cls, rows, filename=''):
|
|
|
|
|
|
with StringIO() as converted:
|
|
|
|
|
|
output = csv.writer(converted)
|
|
|
|
|
|
|
|
|
|
|
|
for row in rows:
|
|
|
|
|
|
output.writerow(row)
|
|
|
|
|
|
return cls(converted.getvalue(), filename)
|
|
|
|
|
|
|
2017-05-04 09:30:55 +01:00
|
|
|
|
@classmethod
|
|
|
|
|
|
def from_dict(cls, dictionary, filename=''):
|
|
|
|
|
|
return cls.from_rows(
|
|
|
|
|
|
zip(
|
|
|
|
|
|
*sorted(dictionary.items(), key=lambda pair: pair[0])
|
|
|
|
|
|
),
|
|
|
|
|
|
filename
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2016-05-15 10:47:52 +01:00
|
|
|
|
@classmethod
|
|
|
|
|
|
def from_file(cls, file_content, filename=''):
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
extension = cls.get_extension(filename)
|
|
|
|
|
|
|
|
|
|
|
|
if extension == 'csv':
|
2016-05-15 10:47:52 +01:00
|
|
|
|
return cls(Spreadsheet.normalise_newlines(file_content), filename)
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
|
|
if extension == 'tsv':
|
2017-08-14 22:18:09 +01:00
|
|
|
|
file_content = StringIO(
|
|
|
|
|
|
Spreadsheet.normalise_newlines(file_content))
|
|
|
|
|
|
|
|
|
|
|
|
instance = cls.from_rows(
|
|
|
|
|
|
pyexcel.iget_array(
|
|
|
|
|
|
file_type=extension,
|
|
|
|
|
|
file_stream=file_content),
|
|
|
|
|
|
filename)
|
|
|
|
|
|
pyexcel.free_resources()
|
|
|
|
|
|
return instance
|
2016-07-05 11:39:07 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_help_argument():
|
|
|
|
|
|
return request.args.get('help') if request.args.get('help') in ('1', '2', '3') else None
|
2016-10-25 18:10:15 +01:00
|
|
|
|
|
|
|
|
|
|
|
2016-10-28 10:45:05 +01:00
|
|
|
|
def is_gov_user(email_address):
|
2018-02-06 09:29:11 +00:00
|
|
|
|
try:
|
2018-02-06 16:55:00 +00:00
|
|
|
|
GovernmentEmailDomain(email_address)
|
2018-02-06 09:29:11 +00:00
|
|
|
|
return True
|
2018-02-06 16:55:00 +00:00
|
|
|
|
except NotGovernmentEmailDomain:
|
2018-02-06 09:29:11 +00:00
|
|
|
|
return False
|
2016-12-05 11:51:19 +00:00
|
|
|
|
|
|
|
|
|
|
|
2016-12-20 14:38:34 +00:00
|
|
|
|
def get_template(
|
|
|
|
|
|
template,
|
|
|
|
|
|
service,
|
|
|
|
|
|
show_recipient=False,
|
|
|
|
|
|
expand_emails=False,
|
|
|
|
|
|
letter_preview_url=None,
|
2017-04-20 10:40:15 +01:00
|
|
|
|
page_count=1,
|
2017-06-24 17:18:49 +01:00
|
|
|
|
redact_missing_personalisation=False,
|
2017-10-17 16:06:15 +01:00
|
|
|
|
email_reply_to=None,
|
2017-11-16 14:13:32 +00:00
|
|
|
|
sms_sender=None,
|
2016-12-20 14:38:34 +00:00
|
|
|
|
):
|
2016-12-08 11:50:59 +00:00
|
|
|
|
if 'email' == template['template_type']:
|
|
|
|
|
|
return EmailPreviewTemplate(
|
|
|
|
|
|
template,
|
2016-12-05 11:51:19 +00:00
|
|
|
|
from_name=service['name'],
|
|
|
|
|
|
from_address='{}@notifications.service.gov.uk'.format(service['email_from']),
|
|
|
|
|
|
expanded=expand_emails,
|
2017-06-24 17:18:49 +01:00
|
|
|
|
show_recipient=show_recipient,
|
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2017-10-17 16:06:15 +01:00
|
|
|
|
reply_to=email_reply_to,
|
2016-12-08 11:50:59 +00:00
|
|
|
|
)
|
|
|
|
|
|
if 'sms' == template['template_type']:
|
|
|
|
|
|
return SMSPreviewTemplate(
|
|
|
|
|
|
template,
|
2016-12-05 11:51:19 +00:00
|
|
|
|
prefix=service['name'],
|
2017-11-16 13:35:17 +00:00
|
|
|
|
show_prefix=service['prefix_sms'],
|
|
|
|
|
|
sender=sms_sender,
|
2017-11-16 14:13:32 +00:00
|
|
|
|
show_sender=bool(sms_sender),
|
2017-06-24 17:18:49 +01:00
|
|
|
|
show_recipient=show_recipient,
|
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2016-12-08 11:50:59 +00:00
|
|
|
|
)
|
|
|
|
|
|
if 'letter' == template['template_type']:
|
2016-12-20 14:38:34 +00:00
|
|
|
|
if letter_preview_url:
|
2017-04-28 16:04:52 +01:00
|
|
|
|
return LetterImageTemplate(
|
2016-12-20 14:38:34 +00:00
|
|
|
|
template,
|
2017-04-28 16:04:52 +01:00
|
|
|
|
image_url=letter_preview_url,
|
2017-04-20 10:40:15 +01:00
|
|
|
|
page_count=int(page_count),
|
2018-01-03 10:44:36 +00:00
|
|
|
|
contact_block=template['reply_to_text']
|
2016-12-20 14:38:34 +00:00
|
|
|
|
)
|
|
|
|
|
|
else:
|
|
|
|
|
|
return LetterPreviewTemplate(
|
2017-03-03 16:53:24 +00:00
|
|
|
|
template,
|
2018-01-03 10:44:36 +00:00
|
|
|
|
contact_block=template['reply_to_text'],
|
2017-06-24 17:18:49 +01:00
|
|
|
|
admin_base_url=current_app.config['ADMIN_BASE_URL'],
|
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2016-12-20 14:38:34 +00:00
|
|
|
|
)
|
2016-12-28 11:06:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
2017-01-25 15:59:06 +00:00
|
|
|
|
def get_current_financial_year():
|
|
|
|
|
|
now = datetime.utcnow()
|
|
|
|
|
|
current_month = int(now.strftime('%-m'))
|
|
|
|
|
|
current_year = int(now.strftime('%Y'))
|
|
|
|
|
|
return current_year if current_month > 3 else current_year - 1
|
2017-06-12 17:21:25 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_time_left(created_at):
|
|
|
|
|
|
return ago.human(
|
|
|
|
|
|
(
|
|
|
|
|
|
datetime.now(timezone.utc).replace(hour=23, minute=59, second=59)
|
|
|
|
|
|
) - (
|
|
|
|
|
|
dateutil.parser.parse(created_at) + timedelta(days=8)
|
|
|
|
|
|
),
|
|
|
|
|
|
future_tense='Data available for {}',
|
|
|
|
|
|
past_tense='Data no longer available', # No-one should ever see this
|
|
|
|
|
|
precision=1
|
|
|
|
|
|
)
|
2017-07-04 17:25:35 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def email_or_sms_not_enabled(template_type, permissions):
|
|
|
|
|
|
return (template_type in ['email', 'sms']) and (template_type not in permissions)
|
2017-07-11 17:06:15 +01:00
|
|
|
|
|
|
|
|
|
|
|
2017-07-14 11:13:37 +01:00
|
|
|
|
def get_letter_timings(upload_time):
|
|
|
|
|
|
|
|
|
|
|
|
LetterTimings = namedtuple(
|
|
|
|
|
|
'LetterTimings',
|
|
|
|
|
|
'printed_by, is_printed, earliest_delivery, latest_delivery'
|
|
|
|
|
|
)
|
2017-07-11 17:06:15 +01:00
|
|
|
|
|
|
|
|
|
|
# shift anything after 5pm to the next day
|
|
|
|
|
|
processing_day = gmt_timezones(upload_time) + timedelta(hours=(7))
|
|
|
|
|
|
|
2017-07-14 11:13:37 +01:00
|
|
|
|
print_day, earliest_delivery, latest_delivery = (
|
2017-07-11 17:06:15 +01:00
|
|
|
|
processing_day + timedelta(days=days)
|
|
|
|
|
|
for days in {
|
2017-07-14 11:13:37 +01:00
|
|
|
|
'Wednesday': (1, 3, 5),
|
|
|
|
|
|
'Thursday': (1, 4, 5),
|
|
|
|
|
|
'Friday': (3, 5, 6),
|
|
|
|
|
|
'Saturday': (2, 4, 5),
|
|
|
|
|
|
}.get(processing_day.strftime('%A'), (1, 3, 4))
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
printed_by = print_day.astimezone(pytz.timezone('Europe/London')).replace(hour=15, minute=0)
|
|
|
|
|
|
now = datetime.utcnow().replace(tzinfo=pytz.timezone('Europe/London'))
|
|
|
|
|
|
|
|
|
|
|
|
return LetterTimings(
|
|
|
|
|
|
printed_by=printed_by,
|
|
|
|
|
|
is_printed=(now > printed_by),
|
|
|
|
|
|
earliest_delivery=earliest_delivery,
|
|
|
|
|
|
latest_delivery=latest_delivery,
|
2017-07-11 17:06:15 +01:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gmt_timezones(date):
|
|
|
|
|
|
date = dateutil.parser.parse(date)
|
|
|
|
|
|
forced_utc = date.replace(tzinfo=pytz.utc)
|
|
|
|
|
|
return forced_utc.astimezone(pytz.timezone('Europe/London'))
|
2017-07-24 15:20:40 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_cdn_domain():
|
|
|
|
|
|
parsed_uri = urlparse(current_app.config['ADMIN_BASE_URL'])
|
|
|
|
|
|
|
|
|
|
|
|
if parsed_uri.netloc.startswith('localhost'):
|
|
|
|
|
|
return 'static-logos.notify.tools'
|
|
|
|
|
|
|
|
|
|
|
|
subdomain = parsed_uri.hostname.split('.')[0]
|
|
|
|
|
|
domain = parsed_uri.netloc[len(subdomain + '.'):]
|
|
|
|
|
|
|
|
|
|
|
|
return "static-logos.{}".format(domain)
|
2017-12-30 16:54:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_filter_args(filter_dict):
|
|
|
|
|
|
if not isinstance(filter_dict, MultiDict):
|
|
|
|
|
|
filter_dict = MultiDict(filter_dict)
|
|
|
|
|
|
|
|
|
|
|
|
return MultiDict(
|
|
|
|
|
|
(
|
|
|
|
|
|
key,
|
|
|
|
|
|
(','.join(filter_dict.getlist(key))).split(',')
|
|
|
|
|
|
)
|
|
|
|
|
|
for key in filter_dict.keys()
|
|
|
|
|
|
if ''.join(filter_dict.getlist(key))
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_status_filters(filter_args):
|
|
|
|
|
|
status_filters = filter_args.get('status', [])
|
|
|
|
|
|
return list(OrderedSet(chain(
|
|
|
|
|
|
(status_filters or REQUESTED_STATUSES),
|
|
|
|
|
|
DELIVERED_STATUSES if 'delivered' in status_filters else [],
|
|
|
|
|
|
SENDING_STATUSES if 'sending' in status_filters else [],
|
|
|
|
|
|
FAILURE_STATUSES if 'failed' in status_filters else []
|
|
|
|
|
|
)))
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
_dir_path = os.path.dirname(os.path.realpath(__file__))
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
2018-03-09 14:53:04 +00:00
|
|
|
|
class AgreementInfo:
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
2018-02-06 11:02:54 +00:00
|
|
|
|
with open('{}/domains.yml'.format(_dir_path)) as domains:
|
|
|
|
|
|
domains = yaml.safe_load(domains)
|
2018-02-06 16:55:00 +00:00
|
|
|
|
domain_names = sorted(domains.keys(), key=len, reverse=True)
|
2018-02-06 09:33:07 +00:00
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
def __init__(self, email_address_or_domain):
|
2018-02-06 16:26:02 +00:00
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
self._match = next(filter(
|
|
|
|
|
|
self.get_matching_function(email_address_or_domain),
|
|
|
|
|
|
self.domain_names,
|
|
|
|
|
|
), None)
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
(
|
|
|
|
|
|
self.owner,
|
|
|
|
|
|
self.crown_status,
|
|
|
|
|
|
self.agreement_signed
|
2018-03-09 14:53:04 +00:00
|
|
|
|
) = self._get_info()
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
2018-03-08 15:13:50 +00:00
|
|
|
|
@classmethod
|
|
|
|
|
|
def from_user(cls, user):
|
|
|
|
|
|
return cls(user.email_address if user.is_authenticated else '')
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
|
def from_current_user(cls):
|
|
|
|
|
|
return cls.from_user(current_user)
|
|
|
|
|
|
|
2018-03-08 16:44:09 +00:00
|
|
|
|
@property
|
|
|
|
|
|
def as_human_readable(self):
|
|
|
|
|
|
if self.agreement_signed:
|
|
|
|
|
|
return 'Yes, on behalf of {}'.format(self.owner)
|
|
|
|
|
|
elif self.owner:
|
|
|
|
|
|
return '{} (organisation is {}, {})'.format(
|
|
|
|
|
|
{
|
|
|
|
|
|
False: 'No',
|
|
|
|
|
|
None: 'Can’t tell',
|
|
|
|
|
|
}.get(self.agreement_signed),
|
|
|
|
|
|
self.owner,
|
|
|
|
|
|
{
|
|
|
|
|
|
True: 'a crown body',
|
|
|
|
|
|
False: 'a non-crown body',
|
|
|
|
|
|
None: 'crown status unknown',
|
|
|
|
|
|
}.get(self.crown_status),
|
|
|
|
|
|
)
|
|
|
|
|
|
else:
|
|
|
|
|
|
return 'Can’t tell'
|
|
|
|
|
|
|
2018-03-16 17:27:47 +00:00
|
|
|
|
@property
|
|
|
|
|
|
def crown_status_or_404(self):
|
|
|
|
|
|
if self.crown_status is None:
|
|
|
|
|
|
abort(404)
|
|
|
|
|
|
return self.crown_status
|
|
|
|
|
|
|
2018-03-20 10:41:58 +00:00
|
|
|
|
def as_request_for_agreement(self, with_owner=False):
|
|
|
|
|
|
if with_owner and self.owner:
|
|
|
|
|
|
return (
|
|
|
|
|
|
'Please send me a copy of the GOV.UK Notify data sharing '
|
|
|
|
|
|
'and financial agreement for {} to sign.'.format(self.owner)
|
|
|
|
|
|
)
|
|
|
|
|
|
return (
|
|
|
|
|
|
'Please send me a copy of the GOV.UK Notify data sharing '
|
|
|
|
|
|
'and financial agreement.'
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2018-02-06 10:55:29 +00:00
|
|
|
|
@staticmethod
|
2018-02-06 14:00:09 +00:00
|
|
|
|
def get_matching_function(email_address_or_domain):
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
|
|
|
|
|
email_address_or_domain = email_address_or_domain.lower()
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
2018-02-06 14:00:09 +00:00
|
|
|
|
def fn(domain):
|
|
|
|
|
|
|
2018-02-06 14:14:12 +00:00
|
|
|
|
return (
|
|
|
|
|
|
email_address_or_domain == domain
|
|
|
|
|
|
) or (
|
|
|
|
|
|
email_address_or_domain.endswith("@{}".format(domain))
|
|
|
|
|
|
) or (
|
|
|
|
|
|
email_address_or_domain.endswith(".{}".format(domain))
|
2018-02-06 14:00:09 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
return fn
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
2018-03-09 14:53:04 +00:00
|
|
|
|
def _get_info(self):
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
details = self.domains.get(self._match) or {}
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
|
|
|
|
|
if isinstance(details, str):
|
2018-03-09 14:53:04 +00:00
|
|
|
|
return AgreementInfo(details)._get_info()
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
|
|
|
|
|
elif isinstance(details, dict):
|
|
|
|
|
|
return(
|
|
|
|
|
|
details.get("owner"),
|
2018-02-06 16:55:00 +00:00
|
|
|
|
details.get("crown"),
|
|
|
|
|
|
details.get("agreement_signed"),
|
2018-02-06 10:55:29 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
|
|
|
|
|
|
class NotGovernmentEmailDomain(Exception):
|
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
2018-03-09 14:53:04 +00:00
|
|
|
|
class GovernmentEmailDomain(AgreementInfo):
|
2018-02-06 16:55:00 +00:00
|
|
|
|
|
|
|
|
|
|
with open('{}/email_domains.yml'.format(_dir_path)) as email_domains:
|
|
|
|
|
|
domain_names = yaml.safe_load(email_domains)
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, email_address_or_domain):
|
|
|
|
|
|
try:
|
|
|
|
|
|
self._match = next(filter(
|
|
|
|
|
|
self.get_matching_function(email_address_or_domain),
|
|
|
|
|
|
self.domain_names,
|
|
|
|
|
|
))
|
|
|
|
|
|
except StopIteration:
|
|
|
|
|
|
raise NotGovernmentEmailDomain()
|