2018-02-20 11:22:17 +00:00
|
|
|
import csv
|
2018-02-06 11:02:54 +00:00
|
|
|
import os
|
2016-02-22 17:17:18 +00:00
|
|
|
import re
|
2016-10-27 17:31:13 +01:00
|
|
|
import unicodedata
|
2018-11-27 16:49:01 +00:00
|
|
|
from datetime import datetime, time, timedelta, timezone
|
2018-02-20 11:22:17 +00:00
|
|
|
from functools import wraps
|
2019-05-03 14:32:02 +01:00
|
|
|
from io import BytesIO, StringIO
|
2018-02-20 11:22:17 +00:00
|
|
|
from itertools import chain
|
|
|
|
|
from os import path
|
|
|
|
|
from urllib.parse import urlparse
|
2016-10-27 17:31:13 +01:00
|
|
|
|
2017-06-12 17:21:25 +01:00
|
|
|
import ago
|
2018-02-20 11:22:17 +00:00
|
|
|
import dateutil
|
2017-06-12 17:21:25 +01:00
|
|
|
import pyexcel
|
2019-05-03 14:32:02 +01:00
|
|
|
import pyexcel_xlsx
|
2018-02-20 11:22:17 +00:00
|
|
|
import yaml
|
2019-04-04 11:13:41 +01:00
|
|
|
from flask import abort, current_app, redirect, request, session, url_for
|
2018-02-20 11:22:17 +00:00
|
|
|
from flask_login import current_user
|
2018-08-23 16:11:08 +01:00
|
|
|
from notifications_utils.field import Field
|
2018-07-02 09:08:21 +01:00
|
|
|
from notifications_utils.formatters import make_quotes_smart
|
2018-02-16 11:35:36 +00:00
|
|
|
from notifications_utils.recipients import RecipientCSV
|
2018-07-11 13:31:38 +01:00
|
|
|
from notifications_utils.take import Take
|
2016-12-08 11:50:59 +00:00
|
|
|
from notifications_utils.template import (
|
|
|
|
|
EmailPreviewTemplate,
|
2017-04-28 16:04:52 +01:00
|
|
|
LetterImageTemplate,
|
2016-12-20 14:38:34 +00:00
|
|
|
LetterPreviewTemplate,
|
2018-02-20 11:22:17 +00:00
|
|
|
SMSPreviewTemplate,
|
2016-12-08 11:50:59 +00:00
|
|
|
)
|
2018-11-27 16:49:01 +00:00
|
|
|
from notifications_utils.timezones import convert_utc_to_bst
|
2017-12-30 16:54:39 +00:00
|
|
|
from orderedset._orderedset import OrderedSet
|
|
|
|
|
from werkzeug.datastructures import MultiDict
|
2019-03-21 16:41:22 +00:00
|
|
|
from werkzeug.routing import RequestRedirect
|
2016-02-19 16:38:04 +00:00
|
|
|
|
2019-05-28 16:11:54 +01:00
|
|
|
from app.notify_client.organisations_api_client import organisations_client
|
|
|
|
|
|
2018-03-19 15:25:26 +00:00
|
|
|
SENDING_STATUSES = ['created', 'pending', 'sending', 'pending-virus-check']
|
2018-09-06 16:34:23 +01:00
|
|
|
DELIVERED_STATUSES = ['delivered', 'sent', 'returned-letter']
|
2018-12-04 15:07:20 +00:00
|
|
|
FAILURE_STATUSES = ['failed', 'temporary-failure', 'permanent-failure',
|
2019-01-10 15:54:31 +00:00
|
|
|
'technical-failure', 'virus-scan-failed', 'validation-failed']
|
2017-01-30 17:27:09 +00:00
|
|
|
REQUESTED_STATUSES = SENDING_STATUSES + DELIVERED_STATUSES + FAILURE_STATUSES
|
|
|
|
|
|
2019-04-08 09:46:05 +01:00
|
|
|
with open('{}/email_domains.yml'.format(
|
|
|
|
|
os.path.dirname(os.path.realpath(__file__))
|
|
|
|
|
)) as email_domains:
|
|
|
|
|
GOVERNMENT_EMAIL_DOMAIN_NAMES = yaml.safe_load(email_domains)
|
|
|
|
|
|
2017-01-30 17:27:09 +00:00
|
|
|
|
2018-02-28 18:13:29 +00:00
|
|
|
def user_has_permissions(*permissions, **permission_kwargs):
|
2016-02-19 16:38:04 +00:00
|
|
|
def wrap(func):
|
|
|
|
|
@wraps(func)
|
|
|
|
|
def wrap_func(*args, **kwargs):
|
2016-10-21 14:24:21 +01:00
|
|
|
if current_user and current_user.is_authenticated:
|
|
|
|
|
if current_user.has_permissions(
|
2017-10-15 15:02:01 +01:00
|
|
|
*permissions,
|
2018-02-28 18:13:29 +00:00
|
|
|
**permission_kwargs
|
2016-10-21 14:24:21 +01:00
|
|
|
):
|
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
else:
|
|
|
|
|
abort(403)
|
2016-02-29 14:57:07 +00:00
|
|
|
else:
|
2016-10-21 14:24:21 +01:00
|
|
|
abort(401)
|
2016-02-19 16:38:04 +00:00
|
|
|
return wrap_func
|
|
|
|
|
return wrap
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
|
2018-12-12 13:10:46 +00:00
|
|
|
def user_is_gov_user(f):
|
|
|
|
|
@wraps(f)
|
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
|
if not current_user.is_gov_user:
|
|
|
|
|
abort(403)
|
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
|
|
2018-02-27 16:45:20 +00:00
|
|
|
def user_is_platform_admin(f):
|
|
|
|
|
@wraps(f)
|
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
|
if not current_user.is_authenticated:
|
|
|
|
|
abort(401)
|
|
|
|
|
if not current_user.platform_admin:
|
|
|
|
|
abort(403)
|
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
|
|
2016-06-17 11:36:30 +01:00
|
|
|
def redirect_to_sign_in(f):
|
|
|
|
|
@wraps(f)
|
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
|
if 'user_details' not in session:
|
|
|
|
|
return redirect(url_for('main.sign_in'))
|
|
|
|
|
else:
|
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
|
|
2016-03-07 18:47:05 +00:00
|
|
|
def get_errors_for_csv(recipients, template_type):
|
|
|
|
|
|
|
|
|
|
errors = []
|
|
|
|
|
|
2018-03-05 15:57:10 +00:00
|
|
|
if any(recipients.rows_with_bad_recipients):
|
2016-03-07 18:47:05 +00:00
|
|
|
number_of_bad_recipients = len(list(recipients.rows_with_bad_recipients))
|
|
|
|
|
if 'sms' == template_type:
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
errors.append("fix 1 phone number")
|
|
|
|
|
else:
|
|
|
|
|
errors.append("fix {} phone numbers".format(number_of_bad_recipients))
|
|
|
|
|
elif 'email' == template_type:
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
errors.append("fix 1 email address")
|
|
|
|
|
else:
|
|
|
|
|
errors.append("fix {} email addresses".format(number_of_bad_recipients))
|
2016-11-10 14:10:39 +00:00
|
|
|
elif 'letter' == template_type:
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
errors.append("fix 1 address")
|
|
|
|
|
else:
|
|
|
|
|
errors.append("fix {} addresses".format(number_of_bad_recipients))
|
2016-03-07 18:47:05 +00:00
|
|
|
|
2018-03-05 15:57:10 +00:00
|
|
|
if any(recipients.rows_with_missing_data):
|
2016-03-07 18:47:05 +00:00
|
|
|
number_of_rows_with_missing_data = len(list(recipients.rows_with_missing_data))
|
|
|
|
|
if 1 == number_of_rows_with_missing_data:
|
2016-04-18 11:27:23 +01:00
|
|
|
errors.append("enter missing data in 1 row")
|
2016-03-07 18:47:05 +00:00
|
|
|
else:
|
2016-04-18 11:27:23 +01:00
|
|
|
errors.append("enter missing data in {} rows".format(number_of_rows_with_missing_data))
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
return errors
|
2016-03-16 16:57:10 +00:00
|
|
|
|
|
|
|
|
|
2017-01-13 11:35:27 +00:00
|
|
|
def generate_notifications_csv(**kwargs):
|
|
|
|
|
from app import notification_api_client
|
2019-01-30 09:42:15 +00:00
|
|
|
from app.s3_client.s3_csv_client import s3download
|
2017-01-13 11:35:27 +00:00
|
|
|
if 'page' not in kwargs:
|
|
|
|
|
kwargs['page'] = 1
|
2018-01-12 14:03:31 +00:00
|
|
|
|
2018-02-16 11:35:36 +00:00
|
|
|
if kwargs.get('job_id'):
|
|
|
|
|
original_file_contents = s3download(kwargs['service_id'], kwargs['job_id'])
|
|
|
|
|
original_upload = RecipientCSV(
|
|
|
|
|
original_file_contents,
|
|
|
|
|
template_type=kwargs['template_type'],
|
|
|
|
|
)
|
|
|
|
|
original_column_headers = original_upload.column_headers
|
|
|
|
|
fieldnames = ['Row number'] + original_column_headers + ['Template', 'Type', 'Job', 'Status', 'Time']
|
2018-01-12 14:03:31 +00:00
|
|
|
else:
|
2018-12-06 11:54:34 +00:00
|
|
|
fieldnames = ['Recipient', 'Template', 'Type', 'Sent by', 'Sent by email', 'Job', 'Status', 'Time']
|
2018-01-12 14:03:31 +00:00
|
|
|
|
2017-04-20 14:55:14 +01:00
|
|
|
yield ','.join(fieldnames) + '\n'
|
2017-01-13 11:35:27 +00:00
|
|
|
|
|
|
|
|
while kwargs['page']:
|
|
|
|
|
notifications_resp = notification_api_client.get_notifications_for_service(**kwargs)
|
2018-02-16 12:34:59 +00:00
|
|
|
for notification in notifications_resp['notifications']:
|
|
|
|
|
if kwargs.get('job_id'):
|
2018-02-16 11:35:36 +00:00
|
|
|
values = [
|
|
|
|
|
notification['row_number'],
|
|
|
|
|
] + [
|
2018-03-05 15:57:10 +00:00
|
|
|
original_upload[notification['row_number'] - 1].get(header).data
|
2018-02-16 11:35:36 +00:00
|
|
|
for header in original_column_headers
|
|
|
|
|
] + [
|
|
|
|
|
notification['template_name'],
|
|
|
|
|
notification['template_type'],
|
|
|
|
|
notification['job_name'],
|
|
|
|
|
notification['status'],
|
2018-02-16 12:34:59 +00:00
|
|
|
notification['created_at'],
|
2018-02-16 11:35:36 +00:00
|
|
|
]
|
2018-02-16 12:34:59 +00:00
|
|
|
else:
|
2018-01-12 14:03:31 +00:00
|
|
|
values = [
|
2018-06-25 16:29:40 +01:00
|
|
|
notification['recipient'],
|
|
|
|
|
notification['template_name'],
|
|
|
|
|
notification['template_type'],
|
2018-09-06 14:41:55 +01:00
|
|
|
notification['created_by_name'] or '',
|
2018-12-06 11:54:34 +00:00
|
|
|
notification['created_by_email_address'] or '',
|
2018-09-06 14:41:55 +01:00
|
|
|
notification['job_name'] or '',
|
2018-01-12 14:03:31 +00:00
|
|
|
notification['status'],
|
2018-06-25 16:29:40 +01:00
|
|
|
notification['created_at']
|
2018-01-12 14:03:31 +00:00
|
|
|
]
|
2018-03-06 15:11:59 +00:00
|
|
|
yield Spreadsheet.from_rows([map(str, values)]).as_csv_data
|
2018-02-16 12:34:59 +00:00
|
|
|
|
2017-01-13 11:35:27 +00:00
|
|
|
if notifications_resp['links'].get('next'):
|
|
|
|
|
kwargs['page'] += 1
|
|
|
|
|
else:
|
|
|
|
|
return
|
2017-06-12 17:21:25 +01:00
|
|
|
raise Exception("Should never reach here")
|
|
|
|
|
|
|
|
|
|
|
2016-03-16 16:57:10 +00:00
|
|
|
def get_page_from_request():
|
|
|
|
|
if 'page' in request.args:
|
|
|
|
|
try:
|
|
|
|
|
return int(request.args['page'])
|
|
|
|
|
except ValueError:
|
|
|
|
|
return None
|
|
|
|
|
else:
|
|
|
|
|
return 1
|
|
|
|
|
|
|
|
|
|
|
2016-10-10 14:50:49 +01:00
|
|
|
def generate_previous_dict(view, service_id, page, url_args=None):
|
2016-10-10 17:15:57 +01:00
|
|
|
return generate_previous_next_dict(view, service_id, page - 1, 'Previous page', url_args or {})
|
2016-10-10 14:50:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_next_dict(view, service_id, page, url_args=None):
|
2016-10-10 17:15:57 +01:00
|
|
|
return generate_previous_next_dict(view, service_id, page + 1, 'Next page', url_args or {})
|
2016-10-10 14:50:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_previous_next_dict(view, service_id, page, title, url_args):
|
2016-03-16 16:57:10 +00:00
|
|
|
return {
|
2016-10-10 14:50:49 +01:00
|
|
|
'url': url_for(view, service_id=service_id, page=page, **url_args),
|
2016-03-16 16:57:10 +00:00
|
|
|
'title': title,
|
2016-10-10 14:50:49 +01:00
|
|
|
'label': 'page {}'.format(page)
|
2016-03-16 16:57:10 +00:00
|
|
|
}
|
2016-03-30 17:12:00 +01:00
|
|
|
|
|
|
|
|
|
2016-10-07 10:59:32 +01:00
|
|
|
def email_safe(string, whitespace='.'):
|
2016-10-27 17:31:13 +01:00
|
|
|
# strips accents, diacritics etc
|
|
|
|
|
string = ''.join(c for c in unicodedata.normalize('NFD', string) if unicodedata.category(c) != 'Mn')
|
|
|
|
|
string = ''.join(
|
|
|
|
|
word.lower() if word.isalnum() or word == whitespace else ''
|
|
|
|
|
for word in re.sub(r'\s+', whitespace, string.strip())
|
|
|
|
|
)
|
|
|
|
|
string = re.sub(r'\.{2,}', '.', string)
|
|
|
|
|
return string.strip('.')
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
|
2018-09-21 14:24:31 +01:00
|
|
|
def id_safe(string):
|
|
|
|
|
return email_safe(string, whitespace='-')
|
|
|
|
|
|
|
|
|
|
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
class Spreadsheet():
|
|
|
|
|
|
|
|
|
|
allowed_file_extensions = ['csv', 'xlsx', 'xls', 'ods', 'xlsm', 'tsv']
|
|
|
|
|
|
2019-05-07 10:50:45 +01:00
|
|
|
def __init__(self, csv_data=None, rows=None, filename=''):
|
|
|
|
|
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
self.filename = filename
|
2019-05-07 10:50:45 +01:00
|
|
|
|
|
|
|
|
if csv_data and rows:
|
|
|
|
|
raise TypeError('Spreadsheet must be created from either rows or CSV data')
|
|
|
|
|
|
|
|
|
|
self._csv_data = csv_data or ''
|
|
|
|
|
self._rows = rows or []
|
2019-05-07 10:36:41 +01:00
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def as_dict(self):
|
|
|
|
|
return {
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
'file_name': self.filename,
|
|
|
|
|
'data': self.as_csv_data
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-07 10:50:45 +01:00
|
|
|
@property
|
|
|
|
|
def as_csv_data(self):
|
|
|
|
|
if not self._csv_data:
|
|
|
|
|
with StringIO() as converted:
|
|
|
|
|
output = csv.writer(converted)
|
|
|
|
|
for row in self._rows:
|
|
|
|
|
output.writerow(row)
|
|
|
|
|
self._csv_data = converted.getvalue()
|
|
|
|
|
return self._csv_data
|
|
|
|
|
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
@classmethod
|
|
|
|
|
def can_handle(cls, filename):
|
|
|
|
|
return cls.get_extension(filename) in cls.allowed_file_extensions
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def get_extension(filename):
|
|
|
|
|
return path.splitext(filename)[1].lower().lstrip('.')
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def normalise_newlines(file_content):
|
2016-07-07 11:52:57 +01:00
|
|
|
return '\r\n'.join(file_content.read().decode('utf-8').splitlines())
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
@classmethod
|
2016-05-15 10:47:52 +01:00
|
|
|
def from_rows(cls, rows, filename=''):
|
2019-05-07 10:50:45 +01:00
|
|
|
return cls(rows=rows, filename=filename)
|
2016-05-15 10:47:52 +01:00
|
|
|
|
2017-05-04 09:30:55 +01:00
|
|
|
@classmethod
|
|
|
|
|
def from_dict(cls, dictionary, filename=''):
|
|
|
|
|
return cls.from_rows(
|
|
|
|
|
zip(
|
|
|
|
|
*sorted(dictionary.items(), key=lambda pair: pair[0])
|
|
|
|
|
),
|
2019-05-07 10:50:45 +01:00
|
|
|
filename=filename,
|
2017-05-04 09:30:55 +01:00
|
|
|
)
|
|
|
|
|
|
2016-05-15 10:47:52 +01:00
|
|
|
@classmethod
|
|
|
|
|
def from_file(cls, file_content, filename=''):
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
extension = cls.get_extension(filename)
|
|
|
|
|
|
|
|
|
|
if extension == 'csv':
|
2019-05-08 10:17:20 +01:00
|
|
|
return cls(csv_data=Spreadsheet.normalise_newlines(file_content), filename=filename)
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
if extension == 'tsv':
|
2017-08-14 22:18:09 +01:00
|
|
|
file_content = StringIO(
|
|
|
|
|
Spreadsheet.normalise_newlines(file_content))
|
|
|
|
|
|
|
|
|
|
instance = cls.from_rows(
|
|
|
|
|
pyexcel.iget_array(
|
|
|
|
|
file_type=extension,
|
|
|
|
|
file_stream=file_content),
|
|
|
|
|
filename)
|
|
|
|
|
pyexcel.free_resources()
|
|
|
|
|
return instance
|
2016-07-05 11:39:07 +01:00
|
|
|
|
2019-05-03 14:32:02 +01:00
|
|
|
@property
|
|
|
|
|
def as_rows(self):
|
2019-05-07 10:50:45 +01:00
|
|
|
if not self._rows:
|
|
|
|
|
self._rows = list(csv.reader(
|
|
|
|
|
self._csv_data.strip().splitlines(),
|
|
|
|
|
quoting=csv.QUOTE_MINIMAL,
|
|
|
|
|
skipinitialspace=True,
|
|
|
|
|
))
|
|
|
|
|
return self._rows
|
2019-05-03 14:32:02 +01:00
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def as_excel_file(self):
|
|
|
|
|
io = BytesIO()
|
|
|
|
|
pyexcel_xlsx.save_data(io, {'Sheet 1': self.as_rows})
|
|
|
|
|
return io.getvalue()
|
|
|
|
|
|
2016-07-05 11:39:07 +01:00
|
|
|
|
|
|
|
|
def get_help_argument():
|
|
|
|
|
return request.args.get('help') if request.args.get('help') in ('1', '2', '3') else None
|
2016-10-25 18:10:15 +01:00
|
|
|
|
|
|
|
|
|
2019-05-28 16:11:54 +01:00
|
|
|
def email_address_ends_with(email_address, known_domains):
|
2019-04-08 09:46:05 +01:00
|
|
|
return any(
|
|
|
|
|
email_address.lower().endswith((
|
|
|
|
|
"@{}".format(known),
|
|
|
|
|
".{}".format(known),
|
|
|
|
|
))
|
2019-05-28 16:11:54 +01:00
|
|
|
for known in known_domains
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def is_gov_user(email_address):
|
|
|
|
|
return email_address_ends_with(
|
|
|
|
|
email_address, GOVERNMENT_EMAIL_DOMAIN_NAMES
|
|
|
|
|
) or email_address_ends_with(
|
|
|
|
|
email_address, organisations_client.get_domains()
|
2019-04-08 09:46:05 +01:00
|
|
|
)
|
2016-12-05 11:51:19 +00:00
|
|
|
|
|
|
|
|
|
2016-12-20 14:38:34 +00:00
|
|
|
def get_template(
|
|
|
|
|
template,
|
|
|
|
|
service,
|
|
|
|
|
show_recipient=False,
|
|
|
|
|
expand_emails=False,
|
|
|
|
|
letter_preview_url=None,
|
2017-04-20 10:40:15 +01:00
|
|
|
page_count=1,
|
2017-06-24 17:18:49 +01:00
|
|
|
redact_missing_personalisation=False,
|
2017-10-17 16:06:15 +01:00
|
|
|
email_reply_to=None,
|
2017-11-16 14:13:32 +00:00
|
|
|
sms_sender=None,
|
2016-12-20 14:38:34 +00:00
|
|
|
):
|
2016-12-08 11:50:59 +00:00
|
|
|
if 'email' == template['template_type']:
|
|
|
|
|
return EmailPreviewTemplate(
|
|
|
|
|
template,
|
2018-07-20 09:17:20 +01:00
|
|
|
from_name=service.name,
|
|
|
|
|
from_address='{}@notifications.service.gov.uk'.format(service.email_from),
|
2016-12-05 11:51:19 +00:00
|
|
|
expanded=expand_emails,
|
2017-06-24 17:18:49 +01:00
|
|
|
show_recipient=show_recipient,
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2017-10-17 16:06:15 +01:00
|
|
|
reply_to=email_reply_to,
|
2016-12-08 11:50:59 +00:00
|
|
|
)
|
|
|
|
|
if 'sms' == template['template_type']:
|
|
|
|
|
return SMSPreviewTemplate(
|
|
|
|
|
template,
|
2018-07-20 09:17:20 +01:00
|
|
|
prefix=service.name,
|
|
|
|
|
show_prefix=service.prefix_sms,
|
2017-11-16 13:35:17 +00:00
|
|
|
sender=sms_sender,
|
2017-11-16 14:13:32 +00:00
|
|
|
show_sender=bool(sms_sender),
|
2017-06-24 17:18:49 +01:00
|
|
|
show_recipient=show_recipient,
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2016-12-08 11:50:59 +00:00
|
|
|
)
|
|
|
|
|
if 'letter' == template['template_type']:
|
2016-12-20 14:38:34 +00:00
|
|
|
if letter_preview_url:
|
2017-04-28 16:04:52 +01:00
|
|
|
return LetterImageTemplate(
|
2016-12-20 14:38:34 +00:00
|
|
|
template,
|
2017-04-28 16:04:52 +01:00
|
|
|
image_url=letter_preview_url,
|
2017-04-20 10:40:15 +01:00
|
|
|
page_count=int(page_count),
|
2018-09-10 11:21:43 +01:00
|
|
|
contact_block=template['reply_to_text'],
|
2019-02-06 14:54:58 +00:00
|
|
|
postage=template['postage'],
|
2016-12-20 14:38:34 +00:00
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
return LetterPreviewTemplate(
|
2017-03-03 16:53:24 +00:00
|
|
|
template,
|
2018-01-03 10:44:36 +00:00
|
|
|
contact_block=template['reply_to_text'],
|
2017-06-24 17:18:49 +01:00
|
|
|
admin_base_url=current_app.config['ADMIN_BASE_URL'],
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2016-12-20 14:38:34 +00:00
|
|
|
)
|
2016-12-28 11:06:11 +00:00
|
|
|
|
|
|
|
|
|
2017-01-25 15:59:06 +00:00
|
|
|
def get_current_financial_year():
|
|
|
|
|
now = datetime.utcnow()
|
|
|
|
|
current_month = int(now.strftime('%-m'))
|
|
|
|
|
current_year = int(now.strftime('%Y'))
|
|
|
|
|
return current_year if current_month > 3 else current_year - 1
|
2017-06-12 17:21:25 +01:00
|
|
|
|
|
|
|
|
|
2018-11-26 15:15:06 +00:00
|
|
|
def get_time_left(created_at, service_data_retention_days=7):
|
2017-06-12 17:21:25 +01:00
|
|
|
return ago.human(
|
|
|
|
|
(
|
2018-11-26 15:15:06 +00:00
|
|
|
datetime.now(timezone.utc)
|
2017-06-12 17:21:25 +01:00
|
|
|
) - (
|
2018-11-26 15:15:06 +00:00
|
|
|
dateutil.parser.parse(created_at).replace(hour=0, minute=0, second=0) + timedelta(
|
|
|
|
|
days=service_data_retention_days + 1
|
|
|
|
|
)
|
2017-06-12 17:21:25 +01:00
|
|
|
),
|
|
|
|
|
future_tense='Data available for {}',
|
|
|
|
|
past_tense='Data no longer available', # No-one should ever see this
|
|
|
|
|
precision=1
|
|
|
|
|
)
|
2017-07-04 17:25:35 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def email_or_sms_not_enabled(template_type, permissions):
|
|
|
|
|
return (template_type in ['email', 'sms']) and (template_type not in permissions)
|
2017-07-11 17:06:15 +01:00
|
|
|
|
|
|
|
|
|
2018-11-29 11:41:13 +00:00
|
|
|
def get_logo_cdn_domain():
|
2017-07-24 15:20:40 +01:00
|
|
|
parsed_uri = urlparse(current_app.config['ADMIN_BASE_URL'])
|
|
|
|
|
|
|
|
|
|
if parsed_uri.netloc.startswith('localhost'):
|
|
|
|
|
return 'static-logos.notify.tools'
|
|
|
|
|
|
|
|
|
|
subdomain = parsed_uri.hostname.split('.')[0]
|
|
|
|
|
domain = parsed_uri.netloc[len(subdomain + '.'):]
|
|
|
|
|
|
|
|
|
|
return "static-logos.{}".format(domain)
|
2017-12-30 16:54:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_filter_args(filter_dict):
|
|
|
|
|
if not isinstance(filter_dict, MultiDict):
|
|
|
|
|
filter_dict = MultiDict(filter_dict)
|
|
|
|
|
|
|
|
|
|
return MultiDict(
|
|
|
|
|
(
|
|
|
|
|
key,
|
|
|
|
|
(','.join(filter_dict.getlist(key))).split(',')
|
|
|
|
|
)
|
|
|
|
|
for key in filter_dict.keys()
|
|
|
|
|
if ''.join(filter_dict.getlist(key))
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_status_filters(filter_args):
|
|
|
|
|
status_filters = filter_args.get('status', [])
|
|
|
|
|
return list(OrderedSet(chain(
|
|
|
|
|
(status_filters or REQUESTED_STATUSES),
|
|
|
|
|
DELIVERED_STATUSES if 'delivered' in status_filters else [],
|
|
|
|
|
SENDING_STATUSES if 'sending' in status_filters else [],
|
|
|
|
|
FAILURE_STATUSES if 'failed' in status_filters else []
|
|
|
|
|
)))
|
2018-02-06 09:29:11 +00:00
|
|
|
|
|
|
|
|
|
2018-04-30 10:46:39 +01:00
|
|
|
def unicode_truncate(s, length):
|
|
|
|
|
encoded = s.encode('utf-8')[:length]
|
|
|
|
|
return encoded.decode('utf-8', 'ignore')
|
2018-07-02 09:08:21 +01:00
|
|
|
|
|
|
|
|
|
2018-07-11 13:31:38 +01:00
|
|
|
def starts_with_initial(name):
|
|
|
|
|
return bool(re.match(r'^.\.', name))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def remove_middle_initial(name):
|
|
|
|
|
return re.sub(r'\s+.\s+', ' ', name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def remove_digits(name):
|
|
|
|
|
return ''.join(c for c in name if not c.isdigit())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def normalize_spaces(name):
|
|
|
|
|
return ' '.join(name.split())
|
|
|
|
|
|
|
|
|
|
|
2018-07-02 09:08:21 +01:00
|
|
|
def guess_name_from_email_address(email_address):
|
|
|
|
|
|
|
|
|
|
possible_name = re.split(r'[\@\+]', email_address)[0]
|
|
|
|
|
|
2018-07-11 13:31:38 +01:00
|
|
|
if '.' not in possible_name or starts_with_initial(possible_name):
|
2018-07-02 09:08:21 +01:00
|
|
|
return ''
|
|
|
|
|
|
2018-07-11 13:31:38 +01:00
|
|
|
return Take(
|
|
|
|
|
possible_name
|
|
|
|
|
).then(
|
|
|
|
|
str.replace, '.', ' '
|
|
|
|
|
).then(
|
|
|
|
|
remove_digits
|
|
|
|
|
).then(
|
|
|
|
|
remove_middle_initial
|
|
|
|
|
).then(
|
|
|
|
|
str.title
|
|
|
|
|
).then(
|
|
|
|
|
make_quotes_smart
|
|
|
|
|
).then(
|
|
|
|
|
normalize_spaces
|
|
|
|
|
)
|
2018-08-09 16:29:51 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def should_skip_template_page(template_type):
|
|
|
|
|
return (
|
2019-02-21 13:03:06 +00:00
|
|
|
current_user.has_permissions('send_messages')
|
|
|
|
|
and not current_user.has_permissions('manage_templates', 'manage_api_keys')
|
|
|
|
|
and template_type != 'letter'
|
2018-08-09 16:29:51 +01:00
|
|
|
)
|
2018-08-23 16:11:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_default_sms_sender(sms_senders):
|
|
|
|
|
return str(next((
|
|
|
|
|
Field(x['sms_sender'], html='escape')
|
|
|
|
|
for x in sms_senders if x['is_default']
|
|
|
|
|
), "None"))
|
2018-11-27 16:49:01 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def printing_today_or_tomorrow():
|
|
|
|
|
now_utc = datetime.utcnow()
|
|
|
|
|
now_bst = convert_utc_to_bst(now_utc)
|
|
|
|
|
|
|
|
|
|
if now_bst.time() < time(17, 30):
|
|
|
|
|
return 'today'
|
|
|
|
|
else:
|
|
|
|
|
return 'tomorrow'
|
2019-02-21 13:03:06 +00:00
|
|
|
|
|
|
|
|
|
2019-02-25 14:27:37 +00:00
|
|
|
def redact_mobile_number(mobile_number, spacing=""):
|
2019-02-21 13:03:06 +00:00
|
|
|
indices = [-4, -5, -6, -7]
|
2019-02-25 14:27:37 +00:00
|
|
|
redact_character = spacing + "•" + spacing
|
2019-02-21 13:03:06 +00:00
|
|
|
mobile_number_list = list(mobile_number.replace(" ", ""))
|
|
|
|
|
for i in indices:
|
2019-02-25 14:27:37 +00:00
|
|
|
mobile_number_list[i] = redact_character
|
2019-02-21 13:03:06 +00:00
|
|
|
return "".join(mobile_number_list)
|
2019-03-21 16:41:22 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class PermanentRedirect(RequestRedirect):
|
|
|
|
|
"""
|
|
|
|
|
In Werkzeug 0.15.0 the status code for RequestRedirect changed from 301 to 308.
|
|
|
|
|
308 status codes are not supported when Internet Explorer is used with Windows 7
|
|
|
|
|
and Windows 8.1, so this class keeps the original status code of 301.
|
|
|
|
|
"""
|
|
|
|
|
code = 301
|