2016-08-12 11:23:50 +01:00
.DEFAULT_GOAL := help
SHELL := /bin/bash
DATE = $( shell date +%Y-%m-%d:%H:%M:%S)
PIP_ACCEL_CACHE ?= ${ CURDIR } /cache/pip-accel
APP_VERSION_FILE = app/version.py
2016-08-26 16:18:13 +01:00
GIT_BRANCH ?= $( shell git symbolic-ref --short HEAD 2> /dev/null || echo "detached" )
2017-02-01 15:44:10 +00:00
GIT_COMMIT ?= $( shell git rev-parse HEAD 2> /dev/null || echo "" )
2016-08-12 11:23:50 +01:00
2016-12-08 16:50:37 +00:00
DOCKER_IMAGE_TAG := $( shell cat docker/VERSION)
DOCKER_BUILDER_IMAGE_NAME = govuk/notify-admin-builder:${ DOCKER_IMAGE_TAG }
2017-01-11 14:02:10 +00:00
DOCKER_TTY ?= $( if ${ JENKINS_HOME } ,,t)
2016-08-12 11:23:50 +01:00
BUILD_TAG ?= notifications-admin-manual
BUILD_NUMBER ?= 0
DEPLOY_BUILD_NUMBER ?= ${ BUILD_NUMBER }
BUILD_URL ?=
DOCKER_CONTAINER_PREFIX = ${ USER } -${ BUILD_TAG }
2016-11-22 12:24:26 +00:00
CODEDEPLOY_PREFIX ?= notifications-admin
CODEDEPLOY_APP_NAME ?= notify-admin
2016-12-08 16:50:37 +00:00
CF_API ?= api.cloud.service.gov.uk
CF_ORG ?= govuk-notify
CF_SPACE ?= ${ DEPLOY_ENV }
2017-02-28 12:31:56 +00:00
CF_HOME ?= ${ HOME }
$( eval export CF_HOME )
2016-12-08 16:50:37 +00:00
2016-08-12 11:23:50 +01:00
.PHONY : help
help :
@cat $( MAKEFILE_LIST) | grep -E '^[a-zA-Z_-]+:.*?## .*$$' | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
.PHONY : venv
venv : venv /bin /activate ## Create virtualenv if it does not exist
venv/bin/activate :
2016-12-08 16:50:37 +00:00
test -d venv || virtualenv venv -p python3
. venv/bin/activate && pip install pip-accel
2016-08-12 11:23:50 +01:00
.PHONY : check -env -vars
check-env-vars : ## Check mandatory environment variables
$( if ${ DEPLOY_ENV } ,,$( error Must specify DEPLOY_ENV) )
$( if ${ DNS_NAME } ,,$( error Must specify DNS_NAME) )
$( if ${ AWS_ACCESS_KEY_ID } ,,$( error Must specify AWS_ACCESS_KEY_ID) )
$( if ${ AWS_SECRET_ACCESS_KEY } ,,$( error Must specify AWS_SECRET_ACCESS_KEY) )
2016-12-08 16:50:37 +00:00
.PHONY : sandbox
sandbox : ## Set environment to sandbox
$( eval export DEPLOY_ENV = sandbox)
$( eval export DNS_NAME = "cloudapps.digital" )
@true
2016-08-24 15:55:54 +01:00
.PHONY : preview
preview : ## Set environment to preview
$( eval export DEPLOY_ENV = preview)
2016-08-12 11:23:50 +01:00
$( eval export DNS_NAME = "notify.works" )
@true
.PHONY : staging
staging : ## Set environment to staging
$( eval export DEPLOY_ENV = staging)
$( eval export DNS_NAME = "staging-notify.works" )
@true
.PHONY : production
production : ## Set environment to production
$( eval export DEPLOY_ENV = production)
$( eval export DNS_NAME = "notifications.service.gov.uk" )
@true
.PHONY : dependencies
dependencies : venv ## Install build dependencies
npm set progress = false
npm install
npm rebuild node-sass
mkdir -p ${ PIP_ACCEL_CACHE }
2016-12-08 16:50:37 +00:00
. venv/bin/activate && PIP_ACCEL_CACHE = ${ PIP_ACCEL_CACHE } pip-accel install -r requirements_for_test.txt
2016-08-12 11:23:50 +01:00
.PHONY : generate -version -file
generate-version-file : ## Generates the app version file
@echo -e " __travis_commit__ = \" ${ GIT_COMMIT } \"\n__time__ = \" ${ DATE } \"\n__travis_job_number__ = \" ${ BUILD_NUMBER } \"\n__travis_job_url__ = \" ${ BUILD_URL } \" " > ${ APP_VERSION_FILE }
.PHONY : build
build : dependencies generate -version -file ## Build project
npm run build
2016-12-08 16:50:37 +00:00
. venv/bin/activate && PIP_ACCEL_CACHE = ${ PIP_ACCEL_CACHE } pip-accel wheel --wheel-dir= wheelhouse -r requirements.txt
2016-08-12 11:23:50 +01:00
2017-01-11 17:00:39 +00:00
.PHONY : cf -build
cf-build : dependencies generate -version -file ## Build project
npm run build
2016-08-12 11:23:50 +01:00
.PHONY : build -codedeploy -artifact
build-codedeploy-artifact : ## Build the deploy artifact for CodeDeploy
2017-02-20 11:38:08 +00:00
rm -rf target
2016-08-12 11:23:50 +01:00
mkdir -p target
2017-02-20 11:38:08 +00:00
zip -y -q -r -x@deploy-exclude.lst target/notifications-admin.zip ./
2016-08-12 11:23:50 +01:00
.PHONY : upload -codedeploy -artifact ## Upload the deploy artifact for CodeDeploy
upload-codedeploy-artifact : check -env -vars
2017-02-27 12:40:53 +00:00
$( if ${ DEPLOY_BUILD_NUMBER } ,,$( error Must specify DEPLOY_BUILD_NUMBER) )
2016-12-05 16:53:15 +00:00
aws s3 cp --region eu-west-1 --sse AES256 target/notifications-admin.zip s3://${ DNS_NAME } -codedeploy/${ CODEDEPLOY_PREFIX } -${ DEPLOY_BUILD_NUMBER } .zip
2016-08-12 11:23:50 +01:00
2017-02-27 12:40:53 +00:00
.PHONY : build -paas -artifact
build-paas-artifact : build -codedeploy -artifact ## Build the deploy artifact for PaaS
.PHONY : upload -paas -artifact ## Upload the deploy artifact for PaaS
upload-paas-artifact :
$( if ${ DEPLOY_BUILD_NUMBER } ,,$( error Must specify DEPLOY_BUILD_NUMBER) )
$( if ${ JENKINS_S3_BUCKET } ,,$( error Must specify JENKINS_S3_BUCKET) )
aws s3 cp --region eu-west-1 --sse AES256 target/notifications-admin.zip s3://${ JENKINS_S3_BUCKET } /build/${ CODEDEPLOY_PREFIX } /${ DEPLOY_BUILD_NUMBER } .zip
2016-08-12 11:23:50 +01:00
.PHONY : test
test : venv ## Run tests
./scripts/run_tests.sh
.PHONY : deploy
deploy : check -env -vars ## Upload deploy artifacts to S3 and trigger CodeDeploy
2016-11-22 12:24:26 +00:00
aws deploy create-deployment --application-name ${ CODEDEPLOY_APP_NAME } --deployment-config-name CodeDeployDefault.OneAtATime --deployment-group-name ${ CODEDEPLOY_APP_NAME } --s3-location bucket = ${ DNS_NAME } -codedeploy,key= ${ CODEDEPLOY_PREFIX } -${ DEPLOY_BUILD_NUMBER } .zip,bundleType= zip --region eu-west-1
2016-08-12 11:23:50 +01:00
2017-01-16 17:06:55 +00:00
.PHONY : check -aws -vars
check-aws-vars : ## Check if AWS access keys are set
$( if ${ AWS_ACCESS_KEY_ID } ,,$( error Must specify AWS_ACCESS_KEY_ID) )
$( if ${ AWS_SECRET_ACCESS_KEY } ,,$( error Must specify AWS_SECRET_ACCESS_KEY) )
2017-01-16 17:51:46 +00:00
.PHONY : deploy -suspend -autoscaling -processes
deploy-suspend-autoscaling-processes : check -aws -vars ## Suspend launch and terminate processes for the auto-scaling group
2017-01-16 17:06:55 +00:00
aws autoscaling suspend-processes --region eu-west-1 --auto-scaling-group-name ${ CODEDEPLOY_APP_NAME } --scaling-processes "Launch" "Terminate"
.PHONY : deploy -resume -autoscaling -processes
deploy-resume-autoscaling-processes : check -aws -vars ## Resume launch and terminate processes for the auto-scaling group
aws autoscaling resume-processes --region eu-west-1 --auto-scaling-group-name ${ CODEDEPLOY_APP_NAME } --scaling-processes "Launch" "Terminate"
.PHONY : deploy -check -autoscaling -processes
deploy-check-autoscaling-processes : check -aws -vars ## Returns with the number of instances with active autoscaling events
@aws autoscaling describe-auto-scaling-groups --region eu-west-1 --auto-scaling-group-names ${ CODEDEPLOY_APP_NAME } | jq '.AutoScalingGroups[0].Instances|map(select(.LifecycleState != "InService"))|length'
2016-08-12 11:23:50 +01:00
.PHONY : coverage
coverage : venv ## Create coverage report
2016-12-08 16:50:37 +00:00
. venv/bin/activate && coveralls
2016-08-12 11:23:50 +01:00
.PHONY : prepare -docker -build -image
prepare-docker-build-image : ## Prepare the Docker builder image
mkdir -p ${ PIP_ACCEL_CACHE }
2016-12-08 16:50:37 +00:00
make -C docker build
2016-08-12 11:23:50 +01:00
2016-12-08 16:50:37 +00:00
d e f i n e r u n _ d o c k e r _ c o n t a i n e r
2017-01-11 14:02:10 +00:00
@docker run -i${ DOCKER_TTY } --rm \
2016-12-08 16:50:37 +00:00
--name " ${ DOCKER_CONTAINER_PREFIX } - ${ 1 } " \
2017-01-09 10:31:20 +00:00
-v "`pwd`:/var/project" \
-v " ${ PIP_ACCEL_CACHE } :/var/project/cache/pip-accel " \
2017-01-11 14:02:10 +00:00
-e UID = $( shell id -u) \
-e GID = $( shell id -g) \
2016-08-23 13:35:21 +01:00
-e GIT_COMMIT = ${ GIT_COMMIT } \
-e BUILD_NUMBER = ${ BUILD_NUMBER } \
-e BUILD_URL = ${ BUILD_URL } \
2016-11-30 15:57:20 +00:00
-e http_proxy = " ${ HTTP_PROXY } " \
-e HTTP_PROXY = " ${ HTTP_PROXY } " \
-e https_proxy = " ${ HTTPS_PROXY } " \
-e HTTPS_PROXY = " ${ HTTPS_PROXY } " \
-e NO_PROXY = " ${ NO_PROXY } " \
2016-08-26 16:18:13 +01:00
-e COVERALLS_REPO_TOKEN = ${ COVERALLS_REPO_TOKEN } \
-e CIRCLECI = 1 \
-e CI_NAME = ${ CI_NAME } \
-e CI_BUILD_NUMBER = ${ BUILD_NUMBER } \
-e CI_BUILD_URL = ${ BUILD_URL } \
-e CI_BRANCH = ${ GIT_BRANCH } \
-e CI_PULL_REQUEST = ${ CI_PULL_REQUEST } \
2016-12-08 16:50:37 +00:00
-e CF_API = " ${ CF_API } " \
-e CF_USERNAME = " ${ CF_USERNAME } " \
-e CF_PASSWORD = " ${ CF_PASSWORD } " \
-e CF_ORG = " ${ CF_ORG } " \
-e CF_SPACE = " ${ CF_SPACE } " \
2016-08-12 11:23:50 +01:00
${ DOCKER_BUILDER_IMAGE_NAME } \
2016-12-08 16:50:37 +00:00
${ 2 }
e n d e f
.PHONY : build -with -docker
build-with-docker : prepare -docker -build -image ## Build inside a Docker container
2017-01-11 14:02:10 +00:00
$( call run_docker_container,build,gosu hostuser make build)
2016-12-08 16:50:37 +00:00
2017-01-11 17:00:39 +00:00
.PHONY : cf -build -with -docker
cf-build-with-docker : prepare -docker -build -image ## Build inside a Docker container
$( call run_docker_container,build,gosu hostuser make cf-build)
2016-12-08 16:50:37 +00:00
.PHONY : test -with -docker
test-with-docker : prepare -docker -build -image ## Run tests inside a Docker container
2017-01-11 14:02:10 +00:00
$( call run_docker_container,test,gosu hostuser make test )
2016-12-08 16:50:37 +00:00
# FIXME: CIRCLECI=1 is an ugly hack because the coveralls-python library sends the PR link only this way
.PHONY : coverage -with -docker
coverage-with-docker : prepare -docker -build -image ## Generates coverage report inside a Docker container
2017-01-11 14:02:10 +00:00
$( call run_docker_container,coverage,gosu hostuser make coverage)
2016-08-12 11:23:50 +01:00
.PHONY : clean -docker -containers
clean-docker-containers : ## Clean up any remaining docker containers
docker rm -f $( shell docker ps -q -f " name= ${ DOCKER_CONTAINER_PREFIX } " ) 2> /dev/null || true
2016-12-08 16:50:37 +00:00
.PHONY : clean
2016-08-12 11:23:50 +01:00
clean :
2016-12-08 16:50:37 +00:00
rm -rf node_modules cache target venv .coverage wheelhouse
.PHONY : cf -login
cf-login : ## Log in to Cloud Foundry
$( if ${ CF_USERNAME } ,,$( error Must specify CF_USERNAME) )
$( if ${ CF_PASSWORD } ,,$( error Must specify CF_PASSWORD) )
$( if ${ CF_SPACE } ,,$( error Must specify CF_SPACE) )
@echo " Logging in to Cloud Foundry on ${ CF_API } "
@cf login -a " ${ CF_API } " -u ${ CF_USERNAME } -p " ${ CF_PASSWORD } " -o " ${ CF_ORG } " -s " ${ CF_SPACE } "
.PHONY : cf -deploy
2017-01-11 17:46:37 +00:00
cf-deploy : ## Deploys the app to Cloud Foundry
2017-02-14 16:17:30 +00:00
$( if ${ CF_SPACE } ,,$( error Must specify CF_SPACE) )
@cf app --guid notify-admin || exit 1
cf rename notify-admin notify-admin-rollback
cf push -f manifest-${ CF_SPACE } .yml
2017-03-23 12:29:36 +00:00
cf scale -i $$ ( cf curl /v2/apps/$$ ( cf app --guid notify-admin-rollback) | jq -r ".entity.instances" 2>/dev/null || echo "1" ) notify-admin
2017-02-14 16:17:30 +00:00
cf stop notify-admin-rollback
cf delete -f notify-admin-rollback
Add config to deploy a prototype version of admin
Sometimes we want to make changes to the admin app for doing user
research that we don’t want all users to see (because we’re not sure if
they’re the right changes to be making).
Previously this meant doing the research using a team member’s computer,
with the app running locally. This was bad for three reasons:
- requires the time of someone who has the code running locally
- requires the participant to use an unfamiliar computer
- means the participant doesn’t have access to their own Notify account
(or an account that we’ve set up for doing user research with)
The dream* would be to have two versions of the frontend app running
side by side in production. This commit makes the dream real – the two
versions of admin are:
- the normal admin app, accessible on
`www.notifications.service.gov.uk`
- a prototype version meant to be pushed to from a developer’s local
machine**, on a `cloudapps.digital` subdomain
Both of these apps share the same backing services, eg config, API
instance, queues, etc, etc. Which means that the prototype version can
be logged into with the same username and password, and the user will
see their service and all their templates when they do so.
Ideally this wouldn’t mean creating a separate base manifest. However
it’s a feature of Cloud Foundry that you can override the application
name. Which means a separate base manifest and a bit of duplication. 😞
* actually the real dream would be to have a version of admin deployed
for each branch of the admin app, but this might get a bit resource
intensive.
** by running `CF_SPACE=preview make preview cf-deploy-prototype`, where
`preview` is the name of the space you want to deploy to
2017-05-05 08:41:55 +01:00
.PHONY : cf -deploy -prototype
cf-deploy-prototype : ## Deploys the app to Cloud Foundry
$( if ${ CF_SPACE } ,,$( error Must specify CF_SPACE) )
cf target -s ${ CF_SPACE }
cf push -f manifest-prototype-${ CF_SPACE } .yml
2017-02-14 16:17:30 +00:00
.PHONY : cf -rollback
cf-rollback : ## Rollbacks the app to the previous release
@cf app --guid notify-admin-rollback || exit 1
2017-03-16 12:29:08 +00:00
@[ $$ ( cf curl /v2/apps/` cf app --guid notify-admin-rollback` | jq -r ".entity.state" ) = "STARTED" ] || ( echo "Error: rollback is not possible because notify-admin-rollback is not in a started state" && exit 1)
2017-02-14 16:17:30 +00:00
cf delete -f notify-admin || true
cf rename notify-admin-rollback notify-admin
2016-12-08 16:50:37 +00:00
2017-01-11 17:46:37 +00:00
.PHONY : cf -push
cf-push :
2017-02-14 16:17:30 +00:00
cf push -f manifest-${ CF_SPACE } .yml