2018-02-20 11:22:17 +00:00
|
|
|
|
import csv
|
2018-02-06 11:02:54 +00:00
|
|
|
|
import os
|
2016-02-22 17:17:18 +00:00
|
|
|
|
import re
|
2016-10-27 17:31:13 +01:00
|
|
|
|
import unicodedata
|
2017-06-12 17:21:25 +01:00
|
|
|
|
from datetime import datetime, timedelta, timezone
|
2018-02-20 11:22:17 +00:00
|
|
|
|
from functools import wraps
|
|
|
|
|
|
from io import StringIO
|
|
|
|
|
|
from itertools import chain
|
|
|
|
|
|
from os import path
|
|
|
|
|
|
from urllib.parse import urlparse
|
2016-10-27 17:31:13 +01:00
|
|
|
|
|
2017-06-12 17:21:25 +01:00
|
|
|
|
import ago
|
2018-02-20 11:22:17 +00:00
|
|
|
|
import dateutil
|
2017-06-12 17:21:25 +01:00
|
|
|
|
import pyexcel
|
2018-02-20 11:22:17 +00:00
|
|
|
|
import yaml
|
2018-03-27 11:13:09 +01:00
|
|
|
|
from flask import (
|
|
|
|
|
|
Markup,
|
|
|
|
|
|
abort,
|
|
|
|
|
|
current_app,
|
|
|
|
|
|
redirect,
|
|
|
|
|
|
request,
|
|
|
|
|
|
session,
|
|
|
|
|
|
url_for,
|
|
|
|
|
|
)
|
2018-02-20 11:22:17 +00:00
|
|
|
|
from flask_login import current_user
|
2018-08-23 16:11:08 +01:00
|
|
|
|
from notifications_utils.field import Field
|
2018-07-02 09:08:21 +01:00
|
|
|
|
from notifications_utils.formatters import make_quotes_smart
|
2018-02-16 11:35:36 +00:00
|
|
|
|
from notifications_utils.recipients import RecipientCSV
|
2018-07-11 13:31:38 +01:00
|
|
|
|
from notifications_utils.take import Take
|
2016-12-08 11:50:59 +00:00
|
|
|
|
from notifications_utils.template import (
|
|
|
|
|
|
EmailPreviewTemplate,
|
2017-04-28 16:04:52 +01:00
|
|
|
|
LetterImageTemplate,
|
2016-12-20 14:38:34 +00:00
|
|
|
|
LetterPreviewTemplate,
|
2018-02-20 11:22:17 +00:00
|
|
|
|
SMSPreviewTemplate,
|
2016-12-08 11:50:59 +00:00
|
|
|
|
)
|
2017-12-30 16:54:39 +00:00
|
|
|
|
from orderedset._orderedset import OrderedSet
|
|
|
|
|
|
from werkzeug.datastructures import MultiDict
|
2016-02-19 16:38:04 +00:00
|
|
|
|
|
2018-03-19 15:25:26 +00:00
|
|
|
|
SENDING_STATUSES = ['created', 'pending', 'sending', 'pending-virus-check']
|
2018-09-06 16:34:23 +01:00
|
|
|
|
DELIVERED_STATUSES = ['delivered', 'sent', 'returned-letter']
|
2018-03-19 15:25:26 +00:00
|
|
|
|
FAILURE_STATUSES = ['failed', 'temporary-failure', 'permanent-failure', 'technical-failure', 'virus-scan-failed']
|
2017-01-30 17:27:09 +00:00
|
|
|
|
REQUESTED_STATUSES = SENDING_STATUSES + DELIVERED_STATUSES + FAILURE_STATUSES
|
|
|
|
|
|
|
|
|
|
|
|
|
2018-02-28 18:13:29 +00:00
|
|
|
|
def user_has_permissions(*permissions, **permission_kwargs):
|
2016-02-19 16:38:04 +00:00
|
|
|
|
def wrap(func):
|
|
|
|
|
|
@wraps(func)
|
|
|
|
|
|
def wrap_func(*args, **kwargs):
|
2016-10-21 14:24:21 +01:00
|
|
|
|
if current_user and current_user.is_authenticated:
|
|
|
|
|
|
if current_user.has_permissions(
|
2017-10-15 15:02:01 +01:00
|
|
|
|
*permissions,
|
2018-02-28 18:13:29 +00:00
|
|
|
|
**permission_kwargs
|
2016-10-21 14:24:21 +01:00
|
|
|
|
):
|
|
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
|
else:
|
|
|
|
|
|
abort(403)
|
2016-02-29 14:57:07 +00:00
|
|
|
|
else:
|
2016-10-21 14:24:21 +01:00
|
|
|
|
abort(401)
|
2016-02-19 16:38:04 +00:00
|
|
|
|
return wrap_func
|
|
|
|
|
|
return wrap
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
2018-02-27 16:45:20 +00:00
|
|
|
|
def user_is_platform_admin(f):
|
|
|
|
|
|
@wraps(f)
|
|
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
|
|
if not current_user.is_authenticated:
|
|
|
|
|
|
abort(401)
|
|
|
|
|
|
if not current_user.platform_admin:
|
|
|
|
|
|
abort(403)
|
|
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-06-17 11:36:30 +01:00
|
|
|
|
def redirect_to_sign_in(f):
|
|
|
|
|
|
@wraps(f)
|
|
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
|
|
if 'user_details' not in session:
|
|
|
|
|
|
return redirect(url_for('main.sign_in'))
|
|
|
|
|
|
else:
|
|
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-03-07 18:47:05 +00:00
|
|
|
|
def get_errors_for_csv(recipients, template_type):
|
|
|
|
|
|
|
|
|
|
|
|
errors = []
|
|
|
|
|
|
|
2018-03-05 15:57:10 +00:00
|
|
|
|
if any(recipients.rows_with_bad_recipients):
|
2016-03-07 18:47:05 +00:00
|
|
|
|
number_of_bad_recipients = len(list(recipients.rows_with_bad_recipients))
|
|
|
|
|
|
if 'sms' == template_type:
|
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
|
errors.append("fix 1 phone number")
|
|
|
|
|
|
else:
|
|
|
|
|
|
errors.append("fix {} phone numbers".format(number_of_bad_recipients))
|
|
|
|
|
|
elif 'email' == template_type:
|
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
|
errors.append("fix 1 email address")
|
|
|
|
|
|
else:
|
|
|
|
|
|
errors.append("fix {} email addresses".format(number_of_bad_recipients))
|
2016-11-10 14:10:39 +00:00
|
|
|
|
elif 'letter' == template_type:
|
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
|
errors.append("fix 1 address")
|
|
|
|
|
|
else:
|
|
|
|
|
|
errors.append("fix {} addresses".format(number_of_bad_recipients))
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
2018-03-05 15:57:10 +00:00
|
|
|
|
if any(recipients.rows_with_missing_data):
|
2016-03-07 18:47:05 +00:00
|
|
|
|
number_of_rows_with_missing_data = len(list(recipients.rows_with_missing_data))
|
|
|
|
|
|
if 1 == number_of_rows_with_missing_data:
|
2016-04-18 11:27:23 +01:00
|
|
|
|
errors.append("enter missing data in 1 row")
|
2016-03-07 18:47:05 +00:00
|
|
|
|
else:
|
2016-04-18 11:27:23 +01:00
|
|
|
|
errors.append("enter missing data in {} rows".format(number_of_rows_with_missing_data))
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
|
|
return errors
|
2016-03-16 16:57:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
2017-01-13 11:35:27 +00:00
|
|
|
|
def generate_notifications_csv(**kwargs):
|
|
|
|
|
|
from app import notification_api_client
|
2018-02-16 11:35:36 +00:00
|
|
|
|
from app.main.s3_client import s3download
|
2017-01-13 11:35:27 +00:00
|
|
|
|
if 'page' not in kwargs:
|
|
|
|
|
|
kwargs['page'] = 1
|
2018-01-12 14:03:31 +00:00
|
|
|
|
|
2018-02-16 11:35:36 +00:00
|
|
|
|
if kwargs.get('job_id'):
|
|
|
|
|
|
original_file_contents = s3download(kwargs['service_id'], kwargs['job_id'])
|
|
|
|
|
|
original_upload = RecipientCSV(
|
|
|
|
|
|
original_file_contents,
|
|
|
|
|
|
template_type=kwargs['template_type'],
|
|
|
|
|
|
)
|
|
|
|
|
|
original_column_headers = original_upload.column_headers
|
|
|
|
|
|
fieldnames = ['Row number'] + original_column_headers + ['Template', 'Type', 'Job', 'Status', 'Time']
|
2018-01-12 14:03:31 +00:00
|
|
|
|
else:
|
2018-09-06 14:41:55 +01:00
|
|
|
|
fieldnames = ['Recipient', 'Template', 'Type', 'Sent by', 'Job', 'Status', 'Time']
|
2018-01-12 14:03:31 +00:00
|
|
|
|
|
2017-04-20 14:55:14 +01:00
|
|
|
|
yield ','.join(fieldnames) + '\n'
|
2017-01-13 11:35:27 +00:00
|
|
|
|
|
|
|
|
|
|
while kwargs['page']:
|
|
|
|
|
|
notifications_resp = notification_api_client.get_notifications_for_service(**kwargs)
|
2018-02-16 12:34:59 +00:00
|
|
|
|
for notification in notifications_resp['notifications']:
|
|
|
|
|
|
if kwargs.get('job_id'):
|
2018-02-16 11:35:36 +00:00
|
|
|
|
values = [
|
|
|
|
|
|
notification['row_number'],
|
|
|
|
|
|
] + [
|
2018-03-05 15:57:10 +00:00
|
|
|
|
original_upload[notification['row_number'] - 1].get(header).data
|
2018-02-16 11:35:36 +00:00
|
|
|
|
for header in original_column_headers
|
|
|
|
|
|
] + [
|
|
|
|
|
|
notification['template_name'],
|
|
|
|
|
|
notification['template_type'],
|
|
|
|
|
|
notification['job_name'],
|
|
|
|
|
|
notification['status'],
|
2018-02-16 12:34:59 +00:00
|
|
|
|
notification['created_at'],
|
2018-02-16 11:35:36 +00:00
|
|
|
|
]
|
2018-02-16 12:34:59 +00:00
|
|
|
|
else:
|
2018-01-12 14:03:31 +00:00
|
|
|
|
values = [
|
2018-06-25 16:29:40 +01:00
|
|
|
|
notification['recipient'],
|
|
|
|
|
|
notification['template_name'],
|
|
|
|
|
|
notification['template_type'],
|
2018-09-06 14:41:55 +01:00
|
|
|
|
notification['created_by_name'] or '',
|
|
|
|
|
|
notification['job_name'] or '',
|
2018-01-12 14:03:31 +00:00
|
|
|
|
notification['status'],
|
2018-06-25 16:29:40 +01:00
|
|
|
|
notification['created_at']
|
2018-01-12 14:03:31 +00:00
|
|
|
|
]
|
2018-03-06 15:11:59 +00:00
|
|
|
|
yield Spreadsheet.from_rows([map(str, values)]).as_csv_data
|
2018-02-16 12:34:59 +00:00
|
|
|
|
|
2017-01-13 11:35:27 +00:00
|
|
|
|
if notifications_resp['links'].get('next'):
|
|
|
|
|
|
kwargs['page'] += 1
|
|
|
|
|
|
else:
|
|
|
|
|
|
return
|
2017-06-12 17:21:25 +01:00
|
|
|
|
raise Exception("Should never reach here")
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-03-16 16:57:10 +00:00
|
|
|
|
def get_page_from_request():
|
|
|
|
|
|
if 'page' in request.args:
|
|
|
|
|
|
try:
|
|
|
|
|
|
return int(request.args['page'])
|
|
|
|
|
|
except ValueError:
|
|
|
|
|
|
return None
|
|
|
|
|
|
else:
|
|
|
|
|
|
return 1
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-10-10 14:50:49 +01:00
|
|
|
|
def generate_previous_dict(view, service_id, page, url_args=None):
|
2016-10-10 17:15:57 +01:00
|
|
|
|
return generate_previous_next_dict(view, service_id, page - 1, 'Previous page', url_args or {})
|
2016-10-10 14:50:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_next_dict(view, service_id, page, url_args=None):
|
2016-10-10 17:15:57 +01:00
|
|
|
|
return generate_previous_next_dict(view, service_id, page + 1, 'Next page', url_args or {})
|
2016-10-10 14:50:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_previous_next_dict(view, service_id, page, title, url_args):
|
2016-03-16 16:57:10 +00:00
|
|
|
|
return {
|
2016-10-10 14:50:49 +01:00
|
|
|
|
'url': url_for(view, service_id=service_id, page=page, **url_args),
|
2016-03-16 16:57:10 +00:00
|
|
|
|
'title': title,
|
2016-10-10 14:50:49 +01:00
|
|
|
|
'label': 'page {}'.format(page)
|
2016-03-16 16:57:10 +00:00
|
|
|
|
}
|
2016-03-30 17:12:00 +01:00
|
|
|
|
|
|
|
|
|
|
|
2016-10-07 10:59:32 +01:00
|
|
|
|
def email_safe(string, whitespace='.'):
|
2016-10-27 17:31:13 +01:00
|
|
|
|
# strips accents, diacritics etc
|
|
|
|
|
|
string = ''.join(c for c in unicodedata.normalize('NFD', string) if unicodedata.category(c) != 'Mn')
|
|
|
|
|
|
string = ''.join(
|
|
|
|
|
|
word.lower() if word.isalnum() or word == whitespace else ''
|
|
|
|
|
|
for word in re.sub(r'\s+', whitespace, string.strip())
|
|
|
|
|
|
)
|
|
|
|
|
|
string = re.sub(r'\.{2,}', '.', string)
|
|
|
|
|
|
return string.strip('.')
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
|
|
|
2018-09-21 14:24:31 +01:00
|
|
|
|
def id_safe(string):
|
|
|
|
|
|
return email_safe(string, whitespace='-')
|
|
|
|
|
|
|
|
|
|
|
|
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
class Spreadsheet():
|
|
|
|
|
|
|
|
|
|
|
|
allowed_file_extensions = ['csv', 'xlsx', 'xls', 'ods', 'xlsm', 'tsv']
|
|
|
|
|
|
|
2016-05-15 10:47:52 +01:00
|
|
|
|
def __init__(self, csv_data, filename=''):
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
self.filename = filename
|
|
|
|
|
|
self.as_csv_data = csv_data
|
|
|
|
|
|
self.as_dict = {
|
|
|
|
|
|
'file_name': self.filename,
|
|
|
|
|
|
'data': self.as_csv_data
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
|
def can_handle(cls, filename):
|
|
|
|
|
|
return cls.get_extension(filename) in cls.allowed_file_extensions
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def get_extension(filename):
|
|
|
|
|
|
return path.splitext(filename)[1].lower().lstrip('.')
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def normalise_newlines(file_content):
|
2016-07-07 11:52:57 +01:00
|
|
|
|
return '\r\n'.join(file_content.read().decode('utf-8').splitlines())
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
|
|
@classmethod
|
2016-05-15 10:47:52 +01:00
|
|
|
|
def from_rows(cls, rows, filename=''):
|
|
|
|
|
|
with StringIO() as converted:
|
|
|
|
|
|
output = csv.writer(converted)
|
|
|
|
|
|
|
|
|
|
|
|
for row in rows:
|
|
|
|
|
|
output.writerow(row)
|
|
|
|
|
|
return cls(converted.getvalue(), filename)
|
|
|
|
|
|
|
2017-05-04 09:30:55 +01:00
|
|
|
|
@classmethod
|
|
|
|
|
|
def from_dict(cls, dictionary, filename=''):
|
|
|
|
|
|
return cls.from_rows(
|
|
|
|
|
|
zip(
|
|
|
|
|
|
*sorted(dictionary.items(), key=lambda pair: pair[0])
|
|
|
|
|
|
),
|
|
|
|
|
|
filename
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2016-05-15 10:47:52 +01:00
|
|
|
|
@classmethod
|
|
|
|
|
|
def from_file(cls, file_content, filename=''):
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
extension = cls.get_extension(filename)
|
|
|
|
|
|
|
|
|
|
|
|
if extension == 'csv':
|
2016-05-15 10:47:52 +01:00
|
|
|
|
return cls(Spreadsheet.normalise_newlines(file_content), filename)
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
|
|
if extension == 'tsv':
|
2017-08-14 22:18:09 +01:00
|
|
|
|
file_content = StringIO(
|
|
|
|
|
|
Spreadsheet.normalise_newlines(file_content))
|
|
|
|
|
|
|
|
|
|
|
|
instance = cls.from_rows(
|
|
|
|
|
|
pyexcel.iget_array(
|
|
|
|
|
|
file_type=extension,
|
|
|
|
|
|
file_stream=file_content),
|
|
|
|
|
|
filename)
|
|
|
|
|
|
pyexcel.free_resources()
|
|
|
|
|
|
return instance
|
2016-07-05 11:39:07 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_help_argument():
|
|
|
|
|
|
return request.args.get('help') if request.args.get('help') in ('1', '2', '3') else None
|
2016-10-25 18:10:15 +01:00
|
|
|
|
|
|
|
|
|
|
|
2016-10-28 10:45:05 +01:00
|
|
|
|
def is_gov_user(email_address):
|
2018-02-06 09:29:11 +00:00
|
|
|
|
try:
|
2018-02-06 16:55:00 +00:00
|
|
|
|
GovernmentEmailDomain(email_address)
|
2018-02-06 09:29:11 +00:00
|
|
|
|
return True
|
2018-02-06 16:55:00 +00:00
|
|
|
|
except NotGovernmentEmailDomain:
|
2018-02-06 09:29:11 +00:00
|
|
|
|
return False
|
2016-12-05 11:51:19 +00:00
|
|
|
|
|
|
|
|
|
|
|
2016-12-20 14:38:34 +00:00
|
|
|
|
def get_template(
|
|
|
|
|
|
template,
|
|
|
|
|
|
service,
|
|
|
|
|
|
show_recipient=False,
|
|
|
|
|
|
expand_emails=False,
|
|
|
|
|
|
letter_preview_url=None,
|
2017-04-20 10:40:15 +01:00
|
|
|
|
page_count=1,
|
2017-06-24 17:18:49 +01:00
|
|
|
|
redact_missing_personalisation=False,
|
2017-10-17 16:06:15 +01:00
|
|
|
|
email_reply_to=None,
|
2017-11-16 14:13:32 +00:00
|
|
|
|
sms_sender=None,
|
2016-12-20 14:38:34 +00:00
|
|
|
|
):
|
2016-12-08 11:50:59 +00:00
|
|
|
|
if 'email' == template['template_type']:
|
|
|
|
|
|
return EmailPreviewTemplate(
|
|
|
|
|
|
template,
|
2018-07-20 09:17:20 +01:00
|
|
|
|
from_name=service.name,
|
|
|
|
|
|
from_address='{}@notifications.service.gov.uk'.format(service.email_from),
|
2016-12-05 11:51:19 +00:00
|
|
|
|
expanded=expand_emails,
|
2017-06-24 17:18:49 +01:00
|
|
|
|
show_recipient=show_recipient,
|
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2017-10-17 16:06:15 +01:00
|
|
|
|
reply_to=email_reply_to,
|
2016-12-08 11:50:59 +00:00
|
|
|
|
)
|
|
|
|
|
|
if 'sms' == template['template_type']:
|
|
|
|
|
|
return SMSPreviewTemplate(
|
|
|
|
|
|
template,
|
2018-07-20 09:17:20 +01:00
|
|
|
|
prefix=service.name,
|
|
|
|
|
|
show_prefix=service.prefix_sms,
|
2017-11-16 13:35:17 +00:00
|
|
|
|
sender=sms_sender,
|
2017-11-16 14:13:32 +00:00
|
|
|
|
show_sender=bool(sms_sender),
|
2017-06-24 17:18:49 +01:00
|
|
|
|
show_recipient=show_recipient,
|
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2016-12-08 11:50:59 +00:00
|
|
|
|
)
|
|
|
|
|
|
if 'letter' == template['template_type']:
|
2016-12-20 14:38:34 +00:00
|
|
|
|
if letter_preview_url:
|
2017-04-28 16:04:52 +01:00
|
|
|
|
return LetterImageTemplate(
|
2016-12-20 14:38:34 +00:00
|
|
|
|
template,
|
2017-04-28 16:04:52 +01:00
|
|
|
|
image_url=letter_preview_url,
|
2017-04-20 10:40:15 +01:00
|
|
|
|
page_count=int(page_count),
|
2018-01-03 10:44:36 +00:00
|
|
|
|
contact_block=template['reply_to_text']
|
2016-12-20 14:38:34 +00:00
|
|
|
|
)
|
|
|
|
|
|
else:
|
|
|
|
|
|
return LetterPreviewTemplate(
|
2017-03-03 16:53:24 +00:00
|
|
|
|
template,
|
2018-01-03 10:44:36 +00:00
|
|
|
|
contact_block=template['reply_to_text'],
|
2017-06-24 17:18:49 +01:00
|
|
|
|
admin_base_url=current_app.config['ADMIN_BASE_URL'],
|
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2016-12-20 14:38:34 +00:00
|
|
|
|
)
|
2016-12-28 11:06:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
2017-01-25 15:59:06 +00:00
|
|
|
|
def get_current_financial_year():
|
|
|
|
|
|
now = datetime.utcnow()
|
|
|
|
|
|
current_month = int(now.strftime('%-m'))
|
|
|
|
|
|
current_year = int(now.strftime('%Y'))
|
|
|
|
|
|
return current_year if current_month > 3 else current_year - 1
|
2017-06-12 17:21:25 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_time_left(created_at):
|
|
|
|
|
|
return ago.human(
|
|
|
|
|
|
(
|
|
|
|
|
|
datetime.now(timezone.utc).replace(hour=23, minute=59, second=59)
|
|
|
|
|
|
) - (
|
|
|
|
|
|
dateutil.parser.parse(created_at) + timedelta(days=8)
|
|
|
|
|
|
),
|
|
|
|
|
|
future_tense='Data available for {}',
|
|
|
|
|
|
past_tense='Data no longer available', # No-one should ever see this
|
|
|
|
|
|
precision=1
|
|
|
|
|
|
)
|
2017-07-04 17:25:35 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def email_or_sms_not_enabled(template_type, permissions):
|
|
|
|
|
|
return (template_type in ['email', 'sms']) and (template_type not in permissions)
|
2017-07-11 17:06:15 +01:00
|
|
|
|
|
|
|
|
|
|
|
2017-07-24 15:20:40 +01:00
|
|
|
|
def get_cdn_domain():
|
|
|
|
|
|
parsed_uri = urlparse(current_app.config['ADMIN_BASE_URL'])
|
|
|
|
|
|
|
|
|
|
|
|
if parsed_uri.netloc.startswith('localhost'):
|
|
|
|
|
|
return 'static-logos.notify.tools'
|
|
|
|
|
|
|
|
|
|
|
|
subdomain = parsed_uri.hostname.split('.')[0]
|
|
|
|
|
|
domain = parsed_uri.netloc[len(subdomain + '.'):]
|
|
|
|
|
|
|
|
|
|
|
|
return "static-logos.{}".format(domain)
|
2017-12-30 16:54:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_filter_args(filter_dict):
|
|
|
|
|
|
if not isinstance(filter_dict, MultiDict):
|
|
|
|
|
|
filter_dict = MultiDict(filter_dict)
|
|
|
|
|
|
|
|
|
|
|
|
return MultiDict(
|
|
|
|
|
|
(
|
|
|
|
|
|
key,
|
|
|
|
|
|
(','.join(filter_dict.getlist(key))).split(',')
|
|
|
|
|
|
)
|
|
|
|
|
|
for key in filter_dict.keys()
|
|
|
|
|
|
if ''.join(filter_dict.getlist(key))
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_status_filters(filter_args):
|
|
|
|
|
|
status_filters = filter_args.get('status', [])
|
|
|
|
|
|
return list(OrderedSet(chain(
|
|
|
|
|
|
(status_filters or REQUESTED_STATUSES),
|
|
|
|
|
|
DELIVERED_STATUSES if 'delivered' in status_filters else [],
|
|
|
|
|
|
SENDING_STATUSES if 'sending' in status_filters else [],
|
|
|
|
|
|
FAILURE_STATUSES if 'failed' in status_filters else []
|
|
|
|
|
|
)))
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
_dir_path = os.path.dirname(os.path.realpath(__file__))
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
2018-03-09 14:53:04 +00:00
|
|
|
|
class AgreementInfo:
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
2018-02-06 11:02:54 +00:00
|
|
|
|
with open('{}/domains.yml'.format(_dir_path)) as domains:
|
|
|
|
|
|
domains = yaml.safe_load(domains)
|
2018-02-06 16:55:00 +00:00
|
|
|
|
domain_names = sorted(domains.keys(), key=len, reverse=True)
|
2018-02-06 09:33:07 +00:00
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
def __init__(self, email_address_or_domain):
|
2018-02-06 16:26:02 +00:00
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
self._match = next(filter(
|
|
|
|
|
|
self.get_matching_function(email_address_or_domain),
|
|
|
|
|
|
self.domain_names,
|
|
|
|
|
|
), None)
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
2018-07-10 16:08:32 +01:00
|
|
|
|
self._domain = email_address_or_domain.split('@')[-1]
|
|
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
(
|
|
|
|
|
|
self.owner,
|
|
|
|
|
|
self.crown_status,
|
2018-09-03 10:46:52 +01:00
|
|
|
|
self.agreement_signed,
|
|
|
|
|
|
self.canonical_domain,
|
2018-03-09 14:53:04 +00:00
|
|
|
|
) = self._get_info()
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
2018-03-08 15:13:50 +00:00
|
|
|
|
@classmethod
|
|
|
|
|
|
def from_user(cls, user):
|
|
|
|
|
|
return cls(user.email_address if user.is_authenticated else '')
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
|
def from_current_user(cls):
|
|
|
|
|
|
return cls.from_user(current_user)
|
|
|
|
|
|
|
2018-03-08 16:44:09 +00:00
|
|
|
|
@property
|
|
|
|
|
|
def as_human_readable(self):
|
|
|
|
|
|
if self.agreement_signed:
|
|
|
|
|
|
return 'Yes, on behalf of {}'.format(self.owner)
|
|
|
|
|
|
elif self.owner:
|
|
|
|
|
|
return '{} (organisation is {}, {})'.format(
|
|
|
|
|
|
{
|
|
|
|
|
|
False: 'No',
|
|
|
|
|
|
None: 'Can’t tell',
|
|
|
|
|
|
}.get(self.agreement_signed),
|
|
|
|
|
|
self.owner,
|
|
|
|
|
|
{
|
|
|
|
|
|
True: 'a crown body',
|
|
|
|
|
|
False: 'a non-crown body',
|
|
|
|
|
|
None: 'crown status unknown',
|
|
|
|
|
|
}.get(self.crown_status),
|
|
|
|
|
|
)
|
|
|
|
|
|
else:
|
2018-07-10 16:08:32 +01:00
|
|
|
|
return 'Can’t tell (domain is {})'.format(self._domain)
|
2018-03-08 16:44:09 +00:00
|
|
|
|
|
2018-07-10 17:18:50 +01:00
|
|
|
|
@property
|
|
|
|
|
|
def as_info_for_branding_request(self):
|
|
|
|
|
|
return self.owner or 'Can’t tell (domain is {})'.format(self._domain)
|
|
|
|
|
|
|
2018-05-09 13:22:41 +01:00
|
|
|
|
@property
|
|
|
|
|
|
def as_jinja_template(self):
|
|
|
|
|
|
if self.crown_status is None:
|
|
|
|
|
|
return 'agreement-choose'
|
|
|
|
|
|
if self.agreement_signed:
|
|
|
|
|
|
return 'agreement-signed'
|
|
|
|
|
|
return 'agreement'
|
|
|
|
|
|
|
2018-03-27 11:13:09 +01:00
|
|
|
|
def as_terms_of_use_paragraph(self, **kwargs):
|
|
|
|
|
|
return Markup(self._as_terms_of_use_paragraph(**kwargs))
|
|
|
|
|
|
|
2018-05-08 14:28:08 +01:00
|
|
|
|
def _as_terms_of_use_paragraph(self, terms_link, download_link, support_link, signed_in):
|
2018-03-27 11:13:09 +01:00
|
|
|
|
|
2018-05-08 14:28:08 +01:00
|
|
|
|
if not signed_in:
|
|
|
|
|
|
return ((
|
|
|
|
|
|
'{} <a href="{}">Sign in</a> to download a copy '
|
|
|
|
|
|
'or find out if one is already in place.'
|
|
|
|
|
|
).format(self._acceptance_required, terms_link))
|
2018-03-27 11:13:09 +01:00
|
|
|
|
|
2018-05-08 14:28:08 +01:00
|
|
|
|
if self.agreement_signed is None:
|
|
|
|
|
|
return ((
|
|
|
|
|
|
'{} <a href="{}">Download the agreement</a> or '
|
|
|
|
|
|
'<a href="{}">contact us</a> to find out if we already '
|
|
|
|
|
|
'have one in place with your organisation.'
|
|
|
|
|
|
).format(self._acceptance_required, download_link, support_link))
|
|
|
|
|
|
|
|
|
|
|
|
if self.agreement_signed is False:
|
2018-03-27 11:13:09 +01:00
|
|
|
|
return ((
|
|
|
|
|
|
'{} <a href="{}">Download a copy</a>.'
|
|
|
|
|
|
).format(self._acceptance_required, download_link))
|
|
|
|
|
|
|
2018-05-08 14:28:08 +01:00
|
|
|
|
return (
|
|
|
|
|
|
'Your organisation ({}) has already accepted the '
|
|
|
|
|
|
'GOV.UK Notify data sharing and financial '
|
|
|
|
|
|
'agreement.'.format(self.owner)
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def as_pricing_paragraph(self, **kwargs):
|
|
|
|
|
|
return Markup(self._as_pricing_paragraph(**kwargs))
|
|
|
|
|
|
|
|
|
|
|
|
def _as_pricing_paragraph(self, pricing_link, download_link, support_link, signed_in):
|
|
|
|
|
|
|
|
|
|
|
|
if not signed_in:
|
|
|
|
|
|
return ((
|
|
|
|
|
|
'<a href="{}">Sign in</a> to download a copy or find '
|
|
|
|
|
|
'out if one is already in place with your organisation.'
|
|
|
|
|
|
).format(pricing_link))
|
|
|
|
|
|
|
|
|
|
|
|
if self.agreement_signed is None:
|
|
|
|
|
|
return ((
|
|
|
|
|
|
'<a href="{}">Download the agreement</a> or '
|
|
|
|
|
|
'<a href="{}">contact us</a> to find out if we already '
|
|
|
|
|
|
'have one in place with your organisation.'
|
|
|
|
|
|
).format(download_link, support_link))
|
|
|
|
|
|
|
|
|
|
|
|
return (
|
|
|
|
|
|
'<a href="{}">Download the agreement</a> '
|
|
|
|
|
|
'({} {}).'.format(
|
|
|
|
|
|
download_link,
|
|
|
|
|
|
self.owner,
|
|
|
|
|
|
{
|
|
|
|
|
|
True: 'has already accepted it',
|
|
|
|
|
|
False: 'hasn’t accepted it yet'
|
|
|
|
|
|
}.get(self.agreement_signed)
|
|
|
|
|
|
)
|
|
|
|
|
|
)
|
2018-03-27 11:13:09 +01:00
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def _acceptance_required(self):
|
|
|
|
|
|
return (
|
|
|
|
|
|
'Your organisation {} must also accept our data sharing '
|
|
|
|
|
|
'and financial agreement.'.format(
|
|
|
|
|
|
'({})'.format(self.owner) if self.owner else '',
|
|
|
|
|
|
)
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2018-03-16 17:27:47 +00:00
|
|
|
|
@property
|
|
|
|
|
|
def crown_status_or_404(self):
|
2018-03-28 13:52:41 +01:00
|
|
|
|
if self.crown_status is None:
|
2018-03-16 17:27:47 +00:00
|
|
|
|
abort(404)
|
|
|
|
|
|
return self.crown_status
|
|
|
|
|
|
|
2018-02-06 10:55:29 +00:00
|
|
|
|
@staticmethod
|
2018-02-06 14:00:09 +00:00
|
|
|
|
def get_matching_function(email_address_or_domain):
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
|
|
|
|
|
email_address_or_domain = email_address_or_domain.lower()
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
2018-02-06 14:00:09 +00:00
|
|
|
|
def fn(domain):
|
|
|
|
|
|
|
2018-02-06 14:14:12 +00:00
|
|
|
|
return (
|
|
|
|
|
|
email_address_or_domain == domain
|
|
|
|
|
|
) or (
|
|
|
|
|
|
email_address_or_domain.endswith("@{}".format(domain))
|
|
|
|
|
|
) or (
|
|
|
|
|
|
email_address_or_domain.endswith(".{}".format(domain))
|
2018-02-06 14:00:09 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
return fn
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
2018-03-09 14:53:04 +00:00
|
|
|
|
def _get_info(self):
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
details = self.domains.get(self._match) or {}
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
|
|
|
|
|
if isinstance(details, str):
|
2018-09-03 10:46:52 +01:00
|
|
|
|
self.is_canonical = False
|
2018-03-09 14:53:04 +00:00
|
|
|
|
return AgreementInfo(details)._get_info()
|
2018-02-06 10:55:29 +00:00
|
|
|
|
|
|
|
|
|
|
elif isinstance(details, dict):
|
2018-09-03 10:46:52 +01:00
|
|
|
|
self.is_canonical = bool(details)
|
2018-02-06 10:55:29 +00:00
|
|
|
|
return(
|
|
|
|
|
|
details.get("owner"),
|
2018-02-06 16:55:00 +00:00
|
|
|
|
details.get("crown"),
|
|
|
|
|
|
details.get("agreement_signed"),
|
2018-09-03 10:46:52 +01:00
|
|
|
|
self._match,
|
2018-02-06 10:55:29 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
2018-02-06 16:55:00 +00:00
|
|
|
|
|
|
|
|
|
|
class NotGovernmentEmailDomain(Exception):
|
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
2018-03-09 14:53:04 +00:00
|
|
|
|
class GovernmentEmailDomain(AgreementInfo):
|
2018-02-06 16:55:00 +00:00
|
|
|
|
|
|
|
|
|
|
with open('{}/email_domains.yml'.format(_dir_path)) as email_domains:
|
|
|
|
|
|
domain_names = yaml.safe_load(email_domains)
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, email_address_or_domain):
|
|
|
|
|
|
try:
|
|
|
|
|
|
self._match = next(filter(
|
|
|
|
|
|
self.get_matching_function(email_address_or_domain),
|
|
|
|
|
|
self.domain_names,
|
|
|
|
|
|
))
|
|
|
|
|
|
except StopIteration:
|
|
|
|
|
|
raise NotGovernmentEmailDomain()
|
2018-04-30 10:46:39 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def unicode_truncate(s, length):
|
|
|
|
|
|
encoded = s.encode('utf-8')[:length]
|
|
|
|
|
|
return encoded.decode('utf-8', 'ignore')
|
2018-07-02 09:08:21 +01:00
|
|
|
|
|
|
|
|
|
|
|
2018-07-11 13:31:38 +01:00
|
|
|
|
def starts_with_initial(name):
|
|
|
|
|
|
return bool(re.match(r'^.\.', name))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def remove_middle_initial(name):
|
|
|
|
|
|
return re.sub(r'\s+.\s+', ' ', name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def remove_digits(name):
|
|
|
|
|
|
return ''.join(c for c in name if not c.isdigit())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def normalize_spaces(name):
|
|
|
|
|
|
return ' '.join(name.split())
|
|
|
|
|
|
|
|
|
|
|
|
|
2018-07-02 09:08:21 +01:00
|
|
|
|
def guess_name_from_email_address(email_address):
|
|
|
|
|
|
|
|
|
|
|
|
possible_name = re.split(r'[\@\+]', email_address)[0]
|
|
|
|
|
|
|
2018-07-11 13:31:38 +01:00
|
|
|
|
if '.' not in possible_name or starts_with_initial(possible_name):
|
2018-07-02 09:08:21 +01:00
|
|
|
|
return ''
|
|
|
|
|
|
|
2018-07-11 13:31:38 +01:00
|
|
|
|
return Take(
|
|
|
|
|
|
possible_name
|
|
|
|
|
|
).then(
|
|
|
|
|
|
str.replace, '.', ' '
|
|
|
|
|
|
).then(
|
|
|
|
|
|
remove_digits
|
|
|
|
|
|
).then(
|
|
|
|
|
|
remove_middle_initial
|
|
|
|
|
|
).then(
|
|
|
|
|
|
str.title
|
|
|
|
|
|
).then(
|
|
|
|
|
|
make_quotes_smart
|
|
|
|
|
|
).then(
|
|
|
|
|
|
normalize_spaces
|
|
|
|
|
|
)
|
2018-08-09 16:29:51 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def should_skip_template_page(template_type):
|
|
|
|
|
|
return (
|
|
|
|
|
|
current_user.has_permissions('send_messages') and
|
|
|
|
|
|
not current_user.has_permissions('manage_templates', 'manage_api_keys') and
|
|
|
|
|
|
template_type != 'letter'
|
|
|
|
|
|
)
|
2018-08-23 16:11:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_default_sms_sender(sms_senders):
|
|
|
|
|
|
return str(next((
|
|
|
|
|
|
Field(x['sms_sender'], html='escape')
|
|
|
|
|
|
for x in sms_senders if x['is_default']
|
|
|
|
|
|
), "None"))
|