2016-02-22 17:17:18 +00:00
|
|
|
import re
|
2016-04-12 14:19:51 +01:00
|
|
|
import csv
|
2017-07-11 17:06:15 +01:00
|
|
|
import pytz
|
2017-06-12 17:21:25 +01:00
|
|
|
from io import StringIO
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
from os import path
|
2016-02-19 16:38:04 +00:00
|
|
|
from functools import wraps
|
2016-10-27 17:31:13 +01:00
|
|
|
import unicodedata
|
2017-07-24 15:20:40 +01:00
|
|
|
from urllib.parse import urlparse
|
2017-07-14 11:13:37 +01:00
|
|
|
from collections import namedtuple
|
2017-06-12 17:21:25 +01:00
|
|
|
from datetime import datetime, timedelta, timezone
|
2016-10-27 17:31:13 +01:00
|
|
|
|
2017-06-12 17:21:25 +01:00
|
|
|
import dateutil
|
|
|
|
|
import ago
|
2017-01-06 17:41:24 +00:00
|
|
|
from flask import (
|
|
|
|
|
abort,
|
|
|
|
|
current_app,
|
|
|
|
|
redirect,
|
|
|
|
|
request,
|
|
|
|
|
session,
|
|
|
|
|
url_for
|
|
|
|
|
)
|
2016-10-25 18:10:15 +01:00
|
|
|
from flask_login import current_user
|
2017-06-12 17:21:25 +01:00
|
|
|
import pyexcel
|
2016-10-27 17:31:13 +01:00
|
|
|
|
2016-12-08 11:50:59 +00:00
|
|
|
from notifications_utils.template import (
|
|
|
|
|
SMSPreviewTemplate,
|
|
|
|
|
EmailPreviewTemplate,
|
2017-04-28 16:04:52 +01:00
|
|
|
LetterImageTemplate,
|
2016-12-20 14:38:34 +00:00
|
|
|
LetterPreviewTemplate,
|
2016-12-08 11:50:59 +00:00
|
|
|
)
|
2016-12-05 11:51:19 +00:00
|
|
|
|
2016-02-19 16:38:04 +00:00
|
|
|
|
2017-01-30 17:27:09 +00:00
|
|
|
SENDING_STATUSES = ['created', 'pending', 'sending']
|
2017-04-27 16:02:49 +01:00
|
|
|
DELIVERED_STATUSES = ['delivered', 'sent']
|
2017-01-30 17:27:09 +00:00
|
|
|
FAILURE_STATUSES = ['failed', 'temporary-failure', 'permanent-failure', 'technical-failure']
|
|
|
|
|
REQUESTED_STATUSES = SENDING_STATUSES + DELIVERED_STATUSES + FAILURE_STATUSES
|
|
|
|
|
|
|
|
|
|
|
2016-01-18 11:15:14 +00:00
|
|
|
class BrowsableItem(object):
|
|
|
|
|
"""
|
|
|
|
|
Maps for the template browse-list.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(self, item, *args, **kwargs):
|
|
|
|
|
self._item = item
|
|
|
|
|
super(BrowsableItem, self).__init__()
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def title(self):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def link(self):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def hint(self):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def destructive(self):
|
|
|
|
|
pass
|
2016-02-01 16:57:40 +00:00
|
|
|
|
|
|
|
|
|
2016-03-30 11:30:18 +01:00
|
|
|
def user_has_permissions(*permissions, admin_override=False, any_=False):
|
2016-02-19 16:38:04 +00:00
|
|
|
def wrap(func):
|
|
|
|
|
@wraps(func)
|
|
|
|
|
def wrap_func(*args, **kwargs):
|
2016-10-21 14:24:21 +01:00
|
|
|
if current_user and current_user.is_authenticated:
|
|
|
|
|
if current_user.has_permissions(
|
|
|
|
|
permissions=permissions,
|
|
|
|
|
admin_override=admin_override,
|
|
|
|
|
any_=any_
|
|
|
|
|
):
|
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
else:
|
|
|
|
|
abort(403)
|
2016-02-29 14:57:07 +00:00
|
|
|
else:
|
2016-10-21 14:24:21 +01:00
|
|
|
abort(401)
|
2016-02-19 16:38:04 +00:00
|
|
|
return wrap_func
|
|
|
|
|
return wrap
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
|
2016-06-17 11:36:30 +01:00
|
|
|
def redirect_to_sign_in(f):
|
|
|
|
|
@wraps(f)
|
|
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
|
if 'user_details' not in session:
|
|
|
|
|
return redirect(url_for('main.sign_in'))
|
|
|
|
|
else:
|
|
|
|
|
return f(*args, **kwargs)
|
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
|
|
|
2016-03-07 18:47:05 +00:00
|
|
|
def get_errors_for_csv(recipients, template_type):
|
|
|
|
|
|
|
|
|
|
errors = []
|
|
|
|
|
|
|
|
|
|
if recipients.rows_with_bad_recipients:
|
|
|
|
|
number_of_bad_recipients = len(list(recipients.rows_with_bad_recipients))
|
|
|
|
|
if 'sms' == template_type:
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
errors.append("fix 1 phone number")
|
|
|
|
|
else:
|
|
|
|
|
errors.append("fix {} phone numbers".format(number_of_bad_recipients))
|
|
|
|
|
elif 'email' == template_type:
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
errors.append("fix 1 email address")
|
|
|
|
|
else:
|
|
|
|
|
errors.append("fix {} email addresses".format(number_of_bad_recipients))
|
2016-11-10 14:10:39 +00:00
|
|
|
elif 'letter' == template_type:
|
|
|
|
|
if 1 == number_of_bad_recipients:
|
|
|
|
|
errors.append("fix 1 address")
|
|
|
|
|
else:
|
|
|
|
|
errors.append("fix {} addresses".format(number_of_bad_recipients))
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
if recipients.rows_with_missing_data:
|
|
|
|
|
number_of_rows_with_missing_data = len(list(recipients.rows_with_missing_data))
|
|
|
|
|
if 1 == number_of_rows_with_missing_data:
|
2016-04-18 11:27:23 +01:00
|
|
|
errors.append("enter missing data in 1 row")
|
2016-03-07 18:47:05 +00:00
|
|
|
else:
|
2016-04-18 11:27:23 +01:00
|
|
|
errors.append("enter missing data in {} rows".format(number_of_rows_with_missing_data))
|
2016-03-07 18:47:05 +00:00
|
|
|
|
|
|
|
|
return errors
|
2016-03-16 16:57:10 +00:00
|
|
|
|
|
|
|
|
|
2017-01-13 11:35:27 +00:00
|
|
|
def generate_notifications_csv(**kwargs):
|
|
|
|
|
from app import notification_api_client
|
|
|
|
|
|
|
|
|
|
if 'page' not in kwargs:
|
|
|
|
|
kwargs['page'] = 1
|
2017-04-20 14:55:14 +01:00
|
|
|
fieldnames = ['Row number', 'Recipient', 'Template', 'Type', 'Job', 'Status', 'Time']
|
|
|
|
|
yield ','.join(fieldnames) + '\n'
|
2017-01-13 11:35:27 +00:00
|
|
|
|
|
|
|
|
while kwargs['page']:
|
|
|
|
|
notifications_resp = notification_api_client.get_notifications_for_service(**kwargs)
|
2017-04-20 14:55:14 +01:00
|
|
|
notifications = notifications_resp['notifications']
|
|
|
|
|
for notification in notifications:
|
|
|
|
|
values = [
|
|
|
|
|
notification['row_number'],
|
|
|
|
|
notification['recipient'],
|
|
|
|
|
notification['template_name'],
|
|
|
|
|
notification['template_type'],
|
|
|
|
|
notification['job_name'],
|
|
|
|
|
notification['status'],
|
|
|
|
|
notification['created_at']
|
|
|
|
|
]
|
2017-06-12 17:21:25 +01:00
|
|
|
line = ','.join(str(i) for i in values) + '\n'
|
2017-01-13 11:35:27 +00:00
|
|
|
yield line
|
|
|
|
|
|
|
|
|
|
if notifications_resp['links'].get('next'):
|
|
|
|
|
kwargs['page'] += 1
|
|
|
|
|
else:
|
|
|
|
|
return
|
2017-06-12 17:21:25 +01:00
|
|
|
raise Exception("Should never reach here")
|
|
|
|
|
|
|
|
|
|
|
2016-03-16 16:57:10 +00:00
|
|
|
def get_page_from_request():
|
|
|
|
|
if 'page' in request.args:
|
|
|
|
|
try:
|
|
|
|
|
return int(request.args['page'])
|
|
|
|
|
except ValueError:
|
|
|
|
|
return None
|
|
|
|
|
else:
|
|
|
|
|
return 1
|
|
|
|
|
|
|
|
|
|
|
2016-10-10 14:50:49 +01:00
|
|
|
def generate_previous_dict(view, service_id, page, url_args=None):
|
2016-10-10 17:15:57 +01:00
|
|
|
return generate_previous_next_dict(view, service_id, page - 1, 'Previous page', url_args or {})
|
2016-10-10 14:50:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_next_dict(view, service_id, page, url_args=None):
|
2016-10-10 17:15:57 +01:00
|
|
|
return generate_previous_next_dict(view, service_id, page + 1, 'Next page', url_args or {})
|
2016-10-10 14:50:49 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_previous_next_dict(view, service_id, page, title, url_args):
|
2016-03-16 16:57:10 +00:00
|
|
|
return {
|
2016-10-10 14:50:49 +01:00
|
|
|
'url': url_for(view, service_id=service_id, page=page, **url_args),
|
2016-03-16 16:57:10 +00:00
|
|
|
'title': title,
|
2016-10-10 14:50:49 +01:00
|
|
|
'label': 'page {}'.format(page)
|
2016-03-16 16:57:10 +00:00
|
|
|
}
|
2016-03-30 17:12:00 +01:00
|
|
|
|
|
|
|
|
|
2016-10-07 10:59:32 +01:00
|
|
|
def email_safe(string, whitespace='.'):
|
2016-10-27 17:31:13 +01:00
|
|
|
# strips accents, diacritics etc
|
|
|
|
|
string = ''.join(c for c in unicodedata.normalize('NFD', string) if unicodedata.category(c) != 'Mn')
|
|
|
|
|
string = ''.join(
|
|
|
|
|
word.lower() if word.isalnum() or word == whitespace else ''
|
|
|
|
|
for word in re.sub(r'\s+', whitespace, string.strip())
|
|
|
|
|
)
|
|
|
|
|
string = re.sub(r'\.{2,}', '.', string)
|
|
|
|
|
return string.strip('.')
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class Spreadsheet():
|
|
|
|
|
|
|
|
|
|
allowed_file_extensions = ['csv', 'xlsx', 'xls', 'ods', 'xlsm', 'tsv']
|
|
|
|
|
|
2016-05-15 10:47:52 +01:00
|
|
|
def __init__(self, csv_data, filename=''):
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
self.filename = filename
|
|
|
|
|
self.as_csv_data = csv_data
|
|
|
|
|
self.as_dict = {
|
|
|
|
|
'file_name': self.filename,
|
|
|
|
|
'data': self.as_csv_data
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def can_handle(cls, filename):
|
|
|
|
|
return cls.get_extension(filename) in cls.allowed_file_extensions
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def get_extension(filename):
|
|
|
|
|
return path.splitext(filename)[1].lower().lstrip('.')
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def normalise_newlines(file_content):
|
2016-07-07 11:52:57 +01:00
|
|
|
return '\r\n'.join(file_content.read().decode('utf-8').splitlines())
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
@classmethod
|
2016-05-15 10:47:52 +01:00
|
|
|
def from_rows(cls, rows, filename=''):
|
|
|
|
|
with StringIO() as converted:
|
|
|
|
|
output = csv.writer(converted)
|
|
|
|
|
|
|
|
|
|
for row in rows:
|
|
|
|
|
output.writerow(row)
|
|
|
|
|
return cls(converted.getvalue(), filename)
|
|
|
|
|
|
2017-05-04 09:30:55 +01:00
|
|
|
@classmethod
|
|
|
|
|
def from_dict(cls, dictionary, filename=''):
|
|
|
|
|
return cls.from_rows(
|
|
|
|
|
zip(
|
|
|
|
|
*sorted(dictionary.items(), key=lambda pair: pair[0])
|
|
|
|
|
),
|
|
|
|
|
filename
|
|
|
|
|
)
|
|
|
|
|
|
2016-05-15 10:47:52 +01:00
|
|
|
@classmethod
|
|
|
|
|
def from_file(cls, file_content, filename=''):
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
extension = cls.get_extension(filename)
|
|
|
|
|
|
|
|
|
|
if extension == 'csv':
|
2016-05-15 10:47:52 +01:00
|
|
|
return cls(Spreadsheet.normalise_newlines(file_content), filename)
|
Accept common spreadsheet formats, not just CSV
We require users to export their spreadsheets as CSV files before
uploading them. But this seems like the sort of thing a computer should
be able to do.
So this commit adds a wrapper class which:
- takes a the uploaded file
- returns it in a normalised format, or reads it using pyexcel[1]
- gives the data back in CSV format
This allows us to accept `.csv`, `.xlsx`, `.xls` (97 and 95), `.ods`,
`.xlsm` and `.tsv` files. We can upload the resultant CSV just like
normal, and process it for errors as before.
Testing
---
To test this I’ve added a selection of common spreadsheet files as test
data. They all contain the same data, so the tests look to see that the
resultant CSV output is the same for each.
UI changes
---
This commit doesn’t change the UI, apart from to give a different error
message if a user uploads a file type that we still don’t understand.
I intend to do this as a separate pull request, in order to fulfil
https://www.pivotaltracker.com/story/show/119371637
2016-05-05 15:41:11 +01:00
|
|
|
|
|
|
|
|
if extension == 'tsv':
|
2017-08-14 22:18:09 +01:00
|
|
|
file_content = StringIO(
|
|
|
|
|
Spreadsheet.normalise_newlines(file_content))
|
|
|
|
|
|
|
|
|
|
instance = cls.from_rows(
|
|
|
|
|
pyexcel.iget_array(
|
|
|
|
|
file_type=extension,
|
|
|
|
|
file_stream=file_content),
|
|
|
|
|
filename)
|
|
|
|
|
pyexcel.free_resources()
|
|
|
|
|
return instance
|
2016-07-05 11:39:07 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_help_argument():
|
|
|
|
|
return request.args.get('help') if request.args.get('help') in ('1', '2', '3') else None
|
2016-10-25 18:10:15 +01:00
|
|
|
|
|
|
|
|
|
2016-10-28 10:45:05 +01:00
|
|
|
def is_gov_user(email_address):
|
|
|
|
|
valid_domains = current_app.config['EMAIL_DOMAIN_REGEXES']
|
2016-10-28 11:44:35 +01:00
|
|
|
email_regex = (r"[\.|@]({})$".format("|".join(valid_domains)))
|
2016-10-28 10:45:05 +01:00
|
|
|
return bool(re.search(email_regex, email_address.lower()))
|
2016-12-05 11:51:19 +00:00
|
|
|
|
|
|
|
|
|
2016-12-20 14:38:34 +00:00
|
|
|
def get_template(
|
|
|
|
|
template,
|
|
|
|
|
service,
|
|
|
|
|
show_recipient=False,
|
|
|
|
|
expand_emails=False,
|
|
|
|
|
letter_preview_url=None,
|
2017-04-20 10:40:15 +01:00
|
|
|
page_count=1,
|
2017-06-24 17:18:49 +01:00
|
|
|
redact_missing_personalisation=False,
|
2017-10-17 16:06:15 +01:00
|
|
|
email_reply_to=None,
|
2017-11-16 14:13:32 +00:00
|
|
|
sms_sender=None,
|
2016-12-20 14:38:34 +00:00
|
|
|
):
|
2016-12-08 11:50:59 +00:00
|
|
|
if 'email' == template['template_type']:
|
|
|
|
|
return EmailPreviewTemplate(
|
|
|
|
|
template,
|
2016-12-05 11:51:19 +00:00
|
|
|
from_name=service['name'],
|
|
|
|
|
from_address='{}@notifications.service.gov.uk'.format(service['email_from']),
|
|
|
|
|
expanded=expand_emails,
|
2017-06-24 17:18:49 +01:00
|
|
|
show_recipient=show_recipient,
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2017-10-17 16:06:15 +01:00
|
|
|
reply_to=email_reply_to,
|
2016-12-08 11:50:59 +00:00
|
|
|
)
|
|
|
|
|
if 'sms' == template['template_type']:
|
|
|
|
|
return SMSPreviewTemplate(
|
|
|
|
|
template,
|
2016-12-05 11:51:19 +00:00
|
|
|
prefix=service['name'],
|
2017-11-16 13:35:17 +00:00
|
|
|
show_prefix=service['prefix_sms'],
|
|
|
|
|
sender=sms_sender,
|
2017-11-16 14:13:32 +00:00
|
|
|
show_sender=bool(sms_sender),
|
2017-06-24 17:18:49 +01:00
|
|
|
show_recipient=show_recipient,
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2016-12-08 11:50:59 +00:00
|
|
|
)
|
|
|
|
|
if 'letter' == template['template_type']:
|
2016-12-20 14:38:34 +00:00
|
|
|
if letter_preview_url:
|
2017-04-28 16:04:52 +01:00
|
|
|
return LetterImageTemplate(
|
2016-12-20 14:38:34 +00:00
|
|
|
template,
|
2017-04-28 16:04:52 +01:00
|
|
|
image_url=letter_preview_url,
|
2017-04-20 10:40:15 +01:00
|
|
|
page_count=int(page_count),
|
2016-12-20 14:38:34 +00:00
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
return LetterPreviewTemplate(
|
2017-03-03 16:53:24 +00:00
|
|
|
template,
|
|
|
|
|
contact_block=service['letter_contact_block'],
|
2017-06-24 17:18:49 +01:00
|
|
|
admin_base_url=current_app.config['ADMIN_BASE_URL'],
|
|
|
|
|
redact_missing_personalisation=redact_missing_personalisation,
|
2016-12-20 14:38:34 +00:00
|
|
|
)
|
2016-12-28 11:06:11 +00:00
|
|
|
|
|
|
|
|
|
2017-01-25 15:59:06 +00:00
|
|
|
def get_current_financial_year():
|
|
|
|
|
now = datetime.utcnow()
|
|
|
|
|
current_month = int(now.strftime('%-m'))
|
|
|
|
|
current_year = int(now.strftime('%Y'))
|
|
|
|
|
return current_year if current_month > 3 else current_year - 1
|
2017-06-12 17:21:25 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_time_left(created_at):
|
|
|
|
|
return ago.human(
|
|
|
|
|
(
|
|
|
|
|
datetime.now(timezone.utc).replace(hour=23, minute=59, second=59)
|
|
|
|
|
) - (
|
|
|
|
|
dateutil.parser.parse(created_at) + timedelta(days=8)
|
|
|
|
|
),
|
|
|
|
|
future_tense='Data available for {}',
|
|
|
|
|
past_tense='Data no longer available', # No-one should ever see this
|
|
|
|
|
precision=1
|
|
|
|
|
)
|
2017-07-04 17:25:35 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def email_or_sms_not_enabled(template_type, permissions):
|
|
|
|
|
return (template_type in ['email', 'sms']) and (template_type not in permissions)
|
2017-07-11 17:06:15 +01:00
|
|
|
|
|
|
|
|
|
2017-07-14 11:13:37 +01:00
|
|
|
def get_letter_timings(upload_time):
|
|
|
|
|
|
|
|
|
|
LetterTimings = namedtuple(
|
|
|
|
|
'LetterTimings',
|
|
|
|
|
'printed_by, is_printed, earliest_delivery, latest_delivery'
|
|
|
|
|
)
|
2017-07-11 17:06:15 +01:00
|
|
|
|
|
|
|
|
# shift anything after 5pm to the next day
|
|
|
|
|
processing_day = gmt_timezones(upload_time) + timedelta(hours=(7))
|
|
|
|
|
|
2017-07-14 11:13:37 +01:00
|
|
|
print_day, earliest_delivery, latest_delivery = (
|
2017-07-11 17:06:15 +01:00
|
|
|
processing_day + timedelta(days=days)
|
|
|
|
|
for days in {
|
2017-07-14 11:13:37 +01:00
|
|
|
'Wednesday': (1, 3, 5),
|
|
|
|
|
'Thursday': (1, 4, 5),
|
|
|
|
|
'Friday': (3, 5, 6),
|
|
|
|
|
'Saturday': (2, 4, 5),
|
|
|
|
|
}.get(processing_day.strftime('%A'), (1, 3, 4))
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
printed_by = print_day.astimezone(pytz.timezone('Europe/London')).replace(hour=15, minute=0)
|
|
|
|
|
now = datetime.utcnow().replace(tzinfo=pytz.timezone('Europe/London'))
|
|
|
|
|
|
|
|
|
|
return LetterTimings(
|
|
|
|
|
printed_by=printed_by,
|
|
|
|
|
is_printed=(now > printed_by),
|
|
|
|
|
earliest_delivery=earliest_delivery,
|
|
|
|
|
latest_delivery=latest_delivery,
|
2017-07-11 17:06:15 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gmt_timezones(date):
|
|
|
|
|
date = dateutil.parser.parse(date)
|
|
|
|
|
forced_utc = date.replace(tzinfo=pytz.utc)
|
|
|
|
|
return forced_utc.astimezone(pytz.timezone('Europe/London'))
|
2017-07-24 15:20:40 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_cdn_domain():
|
|
|
|
|
parsed_uri = urlparse(current_app.config['ADMIN_BASE_URL'])
|
|
|
|
|
|
|
|
|
|
if parsed_uri.netloc.startswith('localhost'):
|
|
|
|
|
return 'static-logos.notify.tools'
|
|
|
|
|
|
|
|
|
|
subdomain = parsed_uri.hostname.split('.')[0]
|
|
|
|
|
domain = parsed_uri.netloc[len(subdomain + '.'):]
|
|
|
|
|
|
|
|
|
|
return "static-logos.{}".format(domain)
|