967 lines
27 KiB
YAML
967 lines
27 KiB
YAML
# Auto DevOps
|
|
# This CI/CD configuration provides a standard pipeline for
|
|
# * building a Docker image (using a buildpack if necessary),
|
|
# * storing the image in the container registry,
|
|
# * running tests from a buildpack,
|
|
# * running code quality analysis,
|
|
# * creating a review app for each topic branch,
|
|
# * and continuous deployment to production
|
|
#
|
|
# Test jobs may be disabled by setting environment variables:
|
|
# * test: TEST_DISABLED
|
|
# * code_quality: CODE_QUALITY_DISABLED
|
|
# * license_management: LICENSE_MANAGEMENT_DISABLED
|
|
# * performance: PERFORMANCE_DISABLED
|
|
# * sast: SAST_DISABLED
|
|
# * dependency_scanning: DEPENDENCY_SCANNING_DISABLED
|
|
# * container_scanning: CONTAINER_SCANNING_DISABLED
|
|
# * dast: DAST_DISABLED
|
|
# * review: REVIEW_DISABLED
|
|
# * stop_review: REVIEW_DISABLED
|
|
#
|
|
# In order to deploy, you must have a Kubernetes cluster configured either
|
|
# via a project integration, or via group/project variables.
|
|
# KUBE_INGRESS_BASE_DOMAIN must also be set on the cluster settings,
|
|
# as a variable at the group or project level, or manually added below.
|
|
#
|
|
# Continuous deployment to production is enabled by default.
|
|
# If you want to deploy to staging first, set STAGING_ENABLED environment variable.
|
|
# If you want to enable incremental rollout, either manual or time based,
|
|
# set INCREMENTAL_ROLLOUT_MODE environment variable to "manual" or "timed".
|
|
# If you want to use canary deployments, set CANARY_ENABLED environment variable.
|
|
#
|
|
# If Auto DevOps fails to detect the proper buildpack, or if you want to
|
|
# specify a custom buildpack, set a project variable `BUILDPACK_URL` to the
|
|
# repository URL of the buildpack.
|
|
# e.g. BUILDPACK_URL=https://github.com/heroku/heroku-buildpack-ruby.git#v142
|
|
# If you need multiple buildpacks, add a file to your project called
|
|
# `.buildpacks` that contains the URLs, one on each line, in order.
|
|
# Note: Auto CI does not work with multiple buildpacks yet
|
|
|
|
image: alpine:latest
|
|
|
|
variables:
|
|
# KUBE_INGRESS_BASE_DOMAIN is the application deployment domain and should be set as a variable at the group or project level.
|
|
# KUBE_INGRESS_BASE_DOMAIN: domain.example.com
|
|
|
|
POSTGRES_USER: user
|
|
POSTGRES_PASSWORD: testing-password
|
|
POSTGRES_ENABLED: "true"
|
|
POSTGRES_DB: $CI_ENVIRONMENT_SLUG
|
|
|
|
KUBERNETES_VERSION: 1.11.6
|
|
HELM_VERSION: 2.12.2
|
|
|
|
DOCKER_DRIVER: overlay2
|
|
|
|
stages:
|
|
- build
|
|
- test
|
|
- review
|
|
- dast
|
|
- staging
|
|
- canary
|
|
- production
|
|
- incremental rollout 10%
|
|
- incremental rollout 25%
|
|
- incremental rollout 50%
|
|
- incremental rollout 100%
|
|
- performance
|
|
- cleanup
|
|
|
|
build:
|
|
stage: build
|
|
image: docker:stable-git
|
|
services:
|
|
- docker:stable-dind
|
|
script:
|
|
- setup_docker
|
|
- build
|
|
only:
|
|
- branches
|
|
|
|
test:
|
|
services:
|
|
- postgres:latest
|
|
variables:
|
|
POSTGRES_DB: test
|
|
stage: test
|
|
image: gliderlabs/herokuish:latest
|
|
script:
|
|
- setup_test_db
|
|
- cp -R . /tmp/app
|
|
- /bin/herokuish buildpack test
|
|
only:
|
|
- branches
|
|
except:
|
|
variables:
|
|
- $TEST_DISABLED
|
|
|
|
code_quality:
|
|
stage: test
|
|
image: docker:stable
|
|
allow_failure: true
|
|
services:
|
|
- docker:stable-dind
|
|
script:
|
|
- setup_docker
|
|
- code_quality
|
|
artifacts:
|
|
paths: [gl-code-quality-report.json]
|
|
only:
|
|
- branches
|
|
except:
|
|
variables:
|
|
- $CODE_QUALITY_DISABLED
|
|
|
|
license_management:
|
|
stage: test
|
|
image:
|
|
name: "registry.gitlab.com/gitlab-org/security-products/license-management:$CI_SERVER_VERSION_MAJOR-$CI_SERVER_VERSION_MINOR-stable"
|
|
entrypoint: [""]
|
|
allow_failure: true
|
|
script:
|
|
- license_management
|
|
artifacts:
|
|
paths: [gl-license-management-report.json]
|
|
only:
|
|
refs:
|
|
- branches
|
|
variables:
|
|
- $GITLAB_FEATURES =~ /\blicense_management\b/
|
|
except:
|
|
variables:
|
|
- $LICENSE_MANAGEMENT_DISABLED
|
|
|
|
performance:
|
|
stage: performance
|
|
image: docker:stable
|
|
allow_failure: true
|
|
services:
|
|
- docker:stable-dind
|
|
script:
|
|
- setup_docker
|
|
- performance
|
|
artifacts:
|
|
paths:
|
|
- performance.json
|
|
- sitespeed-results/
|
|
only:
|
|
refs:
|
|
- branches
|
|
kubernetes: active
|
|
except:
|
|
variables:
|
|
- $PERFORMANCE_DISABLED
|
|
|
|
sast:
|
|
stage: test
|
|
image: docker:stable
|
|
allow_failure: true
|
|
services:
|
|
- docker:stable-dind
|
|
script:
|
|
- setup_docker
|
|
- sast
|
|
artifacts:
|
|
reports:
|
|
sast: gl-sast-report.json
|
|
only:
|
|
refs:
|
|
- branches
|
|
variables:
|
|
- $GITLAB_FEATURES =~ /\bsast\b/
|
|
except:
|
|
variables:
|
|
- $SAST_DISABLED
|
|
|
|
dependency_scanning:
|
|
stage: test
|
|
image: docker:stable
|
|
allow_failure: true
|
|
services:
|
|
- docker:stable-dind
|
|
script:
|
|
- setup_docker
|
|
- dependency_scanning
|
|
artifacts:
|
|
reports:
|
|
dependency_scanning: gl-dependency-scanning-report.json
|
|
only:
|
|
refs:
|
|
- branches
|
|
variables:
|
|
- $GITLAB_FEATURES =~ /\bdependency_scanning\b/
|
|
except:
|
|
variables:
|
|
- $DEPENDENCY_SCANNING_DISABLED
|
|
|
|
container_scanning:
|
|
stage: test
|
|
image: docker:stable
|
|
allow_failure: true
|
|
services:
|
|
- docker:stable-dind
|
|
script:
|
|
- setup_docker
|
|
- container_scanning
|
|
artifacts:
|
|
paths: [gl-container-scanning-report.json]
|
|
only:
|
|
refs:
|
|
- branches
|
|
variables:
|
|
- $GITLAB_FEATURES =~ /\bcontainer_scanning\b/
|
|
except:
|
|
variables:
|
|
- $CONTAINER_SCANNING_DISABLED
|
|
|
|
dast:
|
|
stage: dast
|
|
allow_failure: true
|
|
image: registry.gitlab.com/gitlab-org/security-products/zaproxy
|
|
variables:
|
|
POSTGRES_DB: "false"
|
|
script:
|
|
- dast
|
|
artifacts:
|
|
paths: [gl-dast-report.json]
|
|
only:
|
|
refs:
|
|
- branches
|
|
kubernetes: active
|
|
variables:
|
|
- $GITLAB_FEATURES =~ /\bdast\b/
|
|
except:
|
|
refs:
|
|
- master
|
|
variables:
|
|
- $DAST_DISABLED
|
|
|
|
review:
|
|
stage: review
|
|
script:
|
|
- check_kube_domain
|
|
- install_dependencies
|
|
- download_chart
|
|
- ensure_namespace
|
|
- initialize_tiller
|
|
- create_secret
|
|
- deploy
|
|
- persist_environment_url
|
|
environment:
|
|
name: review/$CI_COMMIT_REF_NAME
|
|
url: http://$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_SLUG.$KUBE_INGRESS_BASE_DOMAIN
|
|
on_stop: stop_review
|
|
artifacts:
|
|
paths: [environment_url.txt]
|
|
only:
|
|
refs:
|
|
- branches
|
|
kubernetes: active
|
|
except:
|
|
refs:
|
|
- master
|
|
variables:
|
|
- $REVIEW_DISABLED
|
|
|
|
stop_review:
|
|
stage: cleanup
|
|
variables:
|
|
GIT_STRATEGY: none
|
|
script:
|
|
- install_dependencies
|
|
- initialize_tiller
|
|
- delete
|
|
environment:
|
|
name: review/$CI_COMMIT_REF_NAME
|
|
action: stop
|
|
when: manual
|
|
allow_failure: true
|
|
only:
|
|
refs:
|
|
- branches
|
|
kubernetes: active
|
|
except:
|
|
refs:
|
|
- master
|
|
variables:
|
|
- $REVIEW_DISABLED
|
|
|
|
# Staging deploys are disabled by default since
|
|
# continuous deployment to production is enabled by default
|
|
# If you prefer to automatically deploy to staging and
|
|
# only manually promote to production, enable this job by setting
|
|
# STAGING_ENABLED.
|
|
|
|
staging:
|
|
stage: staging
|
|
script:
|
|
- check_kube_domain
|
|
- install_dependencies
|
|
- download_chart
|
|
- ensure_namespace
|
|
- initialize_tiller
|
|
- create_secret
|
|
- deploy
|
|
environment:
|
|
name: staging
|
|
url: http://$CI_PROJECT_PATH_SLUG-staging.$KUBE_INGRESS_BASE_DOMAIN
|
|
only:
|
|
refs:
|
|
- master
|
|
kubernetes: active
|
|
variables:
|
|
- $STAGING_ENABLED
|
|
|
|
# Canaries are also disabled by default, but if you want them,
|
|
# and know what the downsides are, you can enable this by setting
|
|
# CANARY_ENABLED.
|
|
|
|
canary:
|
|
stage: canary
|
|
script:
|
|
- check_kube_domain
|
|
- install_dependencies
|
|
- download_chart
|
|
- ensure_namespace
|
|
- initialize_tiller
|
|
- create_secret
|
|
- deploy canary
|
|
environment:
|
|
name: production
|
|
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
|
|
when: manual
|
|
only:
|
|
refs:
|
|
- master
|
|
kubernetes: active
|
|
variables:
|
|
- $CANARY_ENABLED
|
|
|
|
.production: &production_template
|
|
stage: production
|
|
script:
|
|
- check_kube_domain
|
|
- install_dependencies
|
|
- download_chart
|
|
- ensure_namespace
|
|
- initialize_tiller
|
|
- create_secret
|
|
- deploy
|
|
- delete canary
|
|
- delete rollout
|
|
- persist_environment_url
|
|
environment:
|
|
name: production
|
|
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
|
|
artifacts:
|
|
paths: [environment_url.txt]
|
|
|
|
production:
|
|
<<: *production_template
|
|
only:
|
|
refs:
|
|
- master
|
|
kubernetes: active
|
|
except:
|
|
variables:
|
|
- $STAGING_ENABLED
|
|
- $CANARY_ENABLED
|
|
- $INCREMENTAL_ROLLOUT_ENABLED
|
|
- $INCREMENTAL_ROLLOUT_MODE
|
|
|
|
production_manual:
|
|
<<: *production_template
|
|
when: manual
|
|
allow_failure: false
|
|
only:
|
|
refs:
|
|
- master
|
|
kubernetes: active
|
|
variables:
|
|
- $STAGING_ENABLED
|
|
- $CANARY_ENABLED
|
|
except:
|
|
variables:
|
|
- $INCREMENTAL_ROLLOUT_ENABLED
|
|
- $INCREMENTAL_ROLLOUT_MODE
|
|
|
|
# This job implements incremental rollout on for every push to `master`.
|
|
|
|
.rollout: &rollout_template
|
|
script:
|
|
- check_kube_domain
|
|
- install_dependencies
|
|
- download_chart
|
|
- ensure_namespace
|
|
- initialize_tiller
|
|
- create_secret
|
|
- deploy rollout $ROLLOUT_PERCENTAGE
|
|
- scale stable $((100-ROLLOUT_PERCENTAGE))
|
|
- delete canary
|
|
- persist_environment_url
|
|
environment:
|
|
name: production
|
|
url: http://$CI_PROJECT_PATH_SLUG.$KUBE_INGRESS_BASE_DOMAIN
|
|
artifacts:
|
|
paths: [environment_url.txt]
|
|
|
|
.manual_rollout_template: &manual_rollout_template
|
|
<<: *rollout_template
|
|
stage: production
|
|
when: manual
|
|
# This selectors are backward compatible mode with $INCREMENTAL_ROLLOUT_ENABLED (before 11.4)
|
|
only:
|
|
refs:
|
|
- master
|
|
kubernetes: active
|
|
variables:
|
|
- $INCREMENTAL_ROLLOUT_MODE == "manual"
|
|
- $INCREMENTAL_ROLLOUT_ENABLED
|
|
except:
|
|
variables:
|
|
- $INCREMENTAL_ROLLOUT_MODE == "timed"
|
|
|
|
.timed_rollout_template: &timed_rollout_template
|
|
<<: *rollout_template
|
|
when: delayed
|
|
start_in: 5 minutes
|
|
only:
|
|
refs:
|
|
- master
|
|
kubernetes: active
|
|
variables:
|
|
- $INCREMENTAL_ROLLOUT_MODE == "timed"
|
|
|
|
timed rollout 10%:
|
|
<<: *timed_rollout_template
|
|
stage: incremental rollout 10%
|
|
variables:
|
|
ROLLOUT_PERCENTAGE: 10
|
|
|
|
timed rollout 25%:
|
|
<<: *timed_rollout_template
|
|
stage: incremental rollout 25%
|
|
variables:
|
|
ROLLOUT_PERCENTAGE: 25
|
|
|
|
timed rollout 50%:
|
|
<<: *timed_rollout_template
|
|
stage: incremental rollout 50%
|
|
variables:
|
|
ROLLOUT_PERCENTAGE: 50
|
|
|
|
timed rollout 100%:
|
|
<<: *timed_rollout_template
|
|
<<: *production_template
|
|
stage: incremental rollout 100%
|
|
variables:
|
|
ROLLOUT_PERCENTAGE: 100
|
|
|
|
rollout 10%:
|
|
<<: *manual_rollout_template
|
|
variables:
|
|
ROLLOUT_PERCENTAGE: 10
|
|
|
|
rollout 25%:
|
|
<<: *manual_rollout_template
|
|
variables:
|
|
ROLLOUT_PERCENTAGE: 25
|
|
|
|
rollout 50%:
|
|
<<: *manual_rollout_template
|
|
variables:
|
|
ROLLOUT_PERCENTAGE: 50
|
|
|
|
rollout 100%:
|
|
<<: *manual_rollout_template
|
|
<<: *production_template
|
|
allow_failure: false
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
.auto_devops: &auto_devops |
|
|
# Auto DevOps variables and functions
|
|
[[ "$TRACE" ]] && set -x
|
|
auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB}
|
|
export DATABASE_URL=${DATABASE_URL-$auto_database_url}
|
|
export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
|
|
export CI_APPLICATION_TAG=$CI_COMMIT_SHA
|
|
export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID}
|
|
export TILLER_NAMESPACE=$KUBE_NAMESPACE
|
|
# Extract "MAJOR.MINOR" from CI_SERVER_VERSION and generate "MAJOR-MINOR-stable" for Security Products
|
|
export SP_VERSION=$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')
|
|
|
|
function registry_login() {
|
|
if [[ -n "$CI_REGISTRY_USER" ]]; then
|
|
echo "Logging to GitLab Container Registry with CI credentials..."
|
|
docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY"
|
|
echo ""
|
|
fi
|
|
}
|
|
|
|
function container_scanning() {
|
|
registry_login
|
|
|
|
docker run -d --name db arminc/clair-db:latest
|
|
docker run -p 6060:6060 --link db:postgres -d --name clair --restart on-failure arminc/clair-local-scan:v2.0.1
|
|
apk add -U wget ca-certificates
|
|
docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG}
|
|
wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64
|
|
mv clair-scanner_linux_amd64 clair-scanner
|
|
chmod +x clair-scanner
|
|
touch clair-whitelist.yml
|
|
retries=0
|
|
echo "Waiting for clair daemon to start"
|
|
while( ! wget -T 10 -q -O /dev/null http://docker:6060/v1/namespaces ) ; do sleep 1 ; echo -n "." ; if [ $retries -eq 10 ] ; then echo " Timeout, aborting." ; exit 1 ; fi ; retries=$(($retries+1)) ; done
|
|
./clair-scanner -c http://docker:6060 --ip $(hostname -i) -r gl-container-scanning-report.json -l clair.log -w clair-whitelist.yml ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} || true
|
|
}
|
|
|
|
function code_quality() {
|
|
docker run --env SOURCE_CODE="$PWD" \
|
|
--volume "$PWD":/code \
|
|
--volume /var/run/docker.sock:/var/run/docker.sock \
|
|
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
|
|
}
|
|
|
|
function license_management() {
|
|
/run.sh analyze .
|
|
}
|
|
|
|
function sast() {
|
|
case "$CI_SERVER_VERSION" in
|
|
*-ee)
|
|
|
|
# Deprecation notice for CONFIDENCE_LEVEL variable
|
|
if [ -z "$SAST_CONFIDENCE_LEVEL" -a "$CONFIDENCE_LEVEL" ]; then
|
|
SAST_CONFIDENCE_LEVEL="$CONFIDENCE_LEVEL"
|
|
echo "WARNING: CONFIDENCE_LEVEL is deprecated and MUST be replaced with SAST_CONFIDENCE_LEVEL"
|
|
fi
|
|
|
|
docker run --env SAST_CONFIDENCE_LEVEL="${SAST_CONFIDENCE_LEVEL:-3}" \
|
|
--volume "$PWD:/code" \
|
|
--volume /var/run/docker.sock:/var/run/docker.sock \
|
|
"registry.gitlab.com/gitlab-org/security-products/sast:$SP_VERSION" /app/bin/run /code
|
|
;;
|
|
*)
|
|
echo "GitLab EE is required"
|
|
;;
|
|
esac
|
|
}
|
|
|
|
function dependency_scanning() {
|
|
case "$CI_SERVER_VERSION" in
|
|
*-ee)
|
|
docker run --env DEP_SCAN_DISABLE_REMOTE_CHECKS="${DEP_SCAN_DISABLE_REMOTE_CHECKS:-false}" \
|
|
--volume "$PWD:/code" \
|
|
--volume /var/run/docker.sock:/var/run/docker.sock \
|
|
"registry.gitlab.com/gitlab-org/security-products/dependency-scanning:$SP_VERSION" /code
|
|
;;
|
|
*)
|
|
echo "GitLab EE is required"
|
|
;;
|
|
esac
|
|
}
|
|
|
|
function get_replicas() {
|
|
track="${1:-stable}"
|
|
percentage="${2:-100}"
|
|
|
|
env_track=$( echo $track | tr -s '[:lower:]' '[:upper:]' )
|
|
env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]' )
|
|
|
|
if [[ "$track" == "stable" ]] || [[ "$track" == "rollout" ]]; then
|
|
# for stable track get number of replicas from `PRODUCTION_REPLICAS`
|
|
eval new_replicas=\$${env_slug}_REPLICAS
|
|
if [[ -z "$new_replicas" ]]; then
|
|
new_replicas=$REPLICAS
|
|
fi
|
|
else
|
|
# for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS`
|
|
eval new_replicas=\$${env_track}_${env_slug}_REPLICAS
|
|
if [[ -z "$new_replicas" ]]; then
|
|
eval new_replicas=\${env_track}_REPLICAS
|
|
fi
|
|
fi
|
|
|
|
replicas="${new_replicas:-1}"
|
|
replicas="$(($replicas * $percentage / 100))"
|
|
|
|
# always return at least one replicas
|
|
if [[ $replicas -gt 0 ]]; then
|
|
echo "$replicas"
|
|
else
|
|
echo 1
|
|
fi
|
|
}
|
|
|
|
# Extracts variables prefixed with K8S_SECRET_
|
|
# and creates a Kubernetes secret.
|
|
#
|
|
# e.g. If we have the following environment variables:
|
|
# K8S_SECRET_A=value1
|
|
# K8S_SECRET_B=multi\ word\ value
|
|
#
|
|
# Then we will create a secret with the following key-value pairs:
|
|
# data:
|
|
# A: dmFsdWUxCg==
|
|
# B: bXVsdGkgd29yZCB2YWx1ZQo=
|
|
function create_application_secret() {
|
|
track="${1-stable}"
|
|
export APPLICATION_SECRET_NAME=$(application_secret_name "$track")
|
|
|
|
env | sed -n "s/^K8S_SECRET_\(.*\)$/\1/p" > k8s_prefixed_variables
|
|
|
|
kubectl create secret \
|
|
-n "$KUBE_NAMESPACE" generic "$APPLICATION_SECRET_NAME" \
|
|
--from-env-file k8s_prefixed_variables -o yaml --dry-run |
|
|
kubectl replace -n "$KUBE_NAMESPACE" --force -f -
|
|
|
|
export APPLICATION_SECRET_CHECKSUM=$(cat k8s_prefixed_variables | sha256sum | cut -d ' ' -f 1)
|
|
|
|
rm k8s_prefixed_variables
|
|
}
|
|
|
|
function deploy_name() {
|
|
name="$CI_ENVIRONMENT_SLUG"
|
|
track="${1-stable}"
|
|
|
|
if [[ "$track" != "stable" ]]; then
|
|
name="$name-$track"
|
|
fi
|
|
|
|
echo $name
|
|
}
|
|
|
|
function application_secret_name() {
|
|
track="${1-stable}"
|
|
name=$(deploy_name "$track")
|
|
|
|
echo "${name}-secret"
|
|
}
|
|
|
|
function deploy() {
|
|
track="${1-stable}"
|
|
percentage="${2:-100}"
|
|
name=$(deploy_name "$track")
|
|
|
|
replicas="1"
|
|
service_enabled="true"
|
|
postgres_enabled="$POSTGRES_ENABLED"
|
|
|
|
# if track is different than stable,
|
|
# re-use all attached resources
|
|
if [[ "$track" != "stable" ]]; then
|
|
service_enabled="false"
|
|
postgres_enabled="false"
|
|
fi
|
|
|
|
replicas=$(get_replicas "$track" "$percentage")
|
|
|
|
if [[ "$CI_PROJECT_VISIBILITY" != "public" ]]; then
|
|
secret_name='gitlab-registry'
|
|
else
|
|
secret_name=''
|
|
fi
|
|
|
|
create_application_secret "$track"
|
|
|
|
env_slug=$(echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]')
|
|
eval env_ADDITIONAL_HOSTS=\$${env_slug}_ADDITIONAL_HOSTS
|
|
if [ -n "$env_ADDITIONAL_HOSTS" ]; then
|
|
additional_hosts="{$env_ADDITIONAL_HOSTS}"
|
|
elif [ -n "$ADDITIONAL_HOSTS" ]; then
|
|
additional_hosts="{$ADDITIONAL_HOSTS}"
|
|
fi
|
|
|
|
if [[ -n "$DB_INITIALIZE" && -z "$(helm ls -q "^$name$")" ]]; then
|
|
echo "Deploying first release with database initialization..."
|
|
helm upgrade --install \
|
|
--wait \
|
|
--set service.enabled="$service_enabled" \
|
|
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
|
|
--set image.repository="$CI_APPLICATION_REPOSITORY" \
|
|
--set image.tag="$CI_APPLICATION_TAG" \
|
|
--set image.pullPolicy=IfNotPresent \
|
|
--set image.secrets[0].name="$secret_name" \
|
|
--set application.track="$track" \
|
|
--set application.database_url="$DATABASE_URL" \
|
|
--set application.secretName="$APPLICATION_SECRET_NAME" \
|
|
--set application.secretChecksum="$APPLICATION_SECRET_CHECKSUM" \
|
|
--set service.commonName="le.$KUBE_INGRESS_BASE_DOMAIN" \
|
|
--set service.url="$CI_ENVIRONMENT_URL" \
|
|
--set service.additionalHosts="$additional_hosts" \
|
|
--set replicaCount="$replicas" \
|
|
--set postgresql.enabled="$postgres_enabled" \
|
|
--set postgresql.nameOverride="postgres" \
|
|
--set postgresql.postgresUser="$POSTGRES_USER" \
|
|
--set postgresql.postgresPassword="$POSTGRES_PASSWORD" \
|
|
--set postgresql.postgresDatabase="$POSTGRES_DB" \
|
|
--set application.initializeCommand="$DB_INITIALIZE" \
|
|
--namespace="$KUBE_NAMESPACE" \
|
|
"$name" \
|
|
chart/
|
|
|
|
echo "Deploying second release..."
|
|
helm upgrade --reuse-values \
|
|
--wait \
|
|
--set application.initializeCommand="" \
|
|
--set application.migrateCommand="$DB_MIGRATE" \
|
|
--namespace="$KUBE_NAMESPACE" \
|
|
"$name" \
|
|
chart/
|
|
else
|
|
echo "Deploying new release..."
|
|
helm upgrade --install \
|
|
--wait \
|
|
--set service.enabled="$service_enabled" \
|
|
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
|
|
--set image.repository="$CI_APPLICATION_REPOSITORY" \
|
|
--set image.tag="$CI_APPLICATION_TAG" \
|
|
--set image.pullPolicy=IfNotPresent \
|
|
--set image.secrets[0].name="$secret_name" \
|
|
--set application.track="$track" \
|
|
--set application.database_url="$DATABASE_URL" \
|
|
--set application.secretName="$APPLICATION_SECRET_NAME" \
|
|
--set application.secretChecksum="$APPLICATION_SECRET_CHECKSUM" \
|
|
--set service.commonName="le.$KUBE_INGRESS_BASE_DOMAIN" \
|
|
--set service.url="$CI_ENVIRONMENT_URL" \
|
|
--set service.additionalHosts="$additional_hosts" \
|
|
--set replicaCount="$replicas" \
|
|
--set postgresql.enabled="$postgres_enabled" \
|
|
--set postgresql.nameOverride="postgres" \
|
|
--set postgresql.postgresUser="$POSTGRES_USER" \
|
|
--set postgresql.postgresPassword="$POSTGRES_PASSWORD" \
|
|
--set postgresql.postgresDatabase="$POSTGRES_DB" \
|
|
--set application.migrateCommand="$DB_MIGRATE" \
|
|
--namespace="$KUBE_NAMESPACE" \
|
|
"$name" \
|
|
chart/
|
|
fi
|
|
|
|
kubectl rollout status -n "$KUBE_NAMESPACE" -w "deployment/$name"
|
|
}
|
|
|
|
function scale() {
|
|
track="${1-stable}"
|
|
percentage="${2-100}"
|
|
name=$(deploy_name "$track")
|
|
|
|
replicas=$(get_replicas "$track" "$percentage")
|
|
|
|
if [[ -n "$(helm ls -q "^$name$")" ]]; then
|
|
helm upgrade --reuse-values \
|
|
--wait \
|
|
--set replicaCount="$replicas" \
|
|
--namespace="$KUBE_NAMESPACE" \
|
|
"$name" \
|
|
chart/
|
|
fi
|
|
}
|
|
|
|
function install_dependencies() {
|
|
apk add -U openssl curl tar gzip bash ca-certificates git
|
|
curl -L -o /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
|
|
curl -L -O https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.28-r0/glibc-2.28-r0.apk
|
|
apk add glibc-2.28-r0.apk
|
|
rm glibc-2.28-r0.apk
|
|
|
|
curl "https://kubernetes-helm.storage.googleapis.com/helm-v${HELM_VERSION}-linux-amd64.tar.gz" | tar zx
|
|
mv linux-amd64/helm /usr/bin/
|
|
mv linux-amd64/tiller /usr/bin/
|
|
helm version --client
|
|
tiller -version
|
|
|
|
curl -L -o /usr/bin/kubectl "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
|
chmod +x /usr/bin/kubectl
|
|
kubectl version --client
|
|
}
|
|
|
|
function setup_docker() {
|
|
if ! docker info &>/dev/null; then
|
|
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
|
|
export DOCKER_HOST='tcp://localhost:2375'
|
|
fi
|
|
fi
|
|
}
|
|
|
|
function setup_test_db() {
|
|
if [ -z ${KUBERNETES_PORT+x} ]; then
|
|
DB_HOST=postgres
|
|
else
|
|
DB_HOST=localhost
|
|
fi
|
|
export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}"
|
|
}
|
|
|
|
function download_chart() {
|
|
if [[ ! -d chart ]]; then
|
|
auto_chart=${AUTO_DEVOPS_CHART:-gitlab/auto-deploy-app}
|
|
auto_chart_name=$(basename $auto_chart)
|
|
auto_chart_name=${auto_chart_name%.tgz}
|
|
auto_chart_name=${auto_chart_name%.tar.gz}
|
|
else
|
|
auto_chart="chart"
|
|
auto_chart_name="chart"
|
|
fi
|
|
|
|
helm init --client-only
|
|
helm repo add gitlab ${AUTO_DEVOPS_CHART_REPOSITORY:-https://charts.gitlab.io}
|
|
if [[ ! -d "$auto_chart" ]]; then
|
|
helm fetch ${auto_chart} --untar
|
|
fi
|
|
if [ "$auto_chart_name" != "chart" ]; then
|
|
mv ${auto_chart_name} chart
|
|
fi
|
|
|
|
helm dependency update chart/
|
|
helm dependency build chart/
|
|
}
|
|
|
|
function ensure_namespace() {
|
|
kubectl describe namespace "$KUBE_NAMESPACE" || kubectl create namespace "$KUBE_NAMESPACE"
|
|
}
|
|
|
|
|
|
# Function to ensure backwards compatibility with AUTO_DEVOPS_DOMAIN
|
|
function ensure_kube_ingress_base_domain() {
|
|
if [ -z ${KUBE_INGRESS_BASE_DOMAIN+x} ] && [ -n "$AUTO_DEVOPS_DOMAIN" ] ; then
|
|
export KUBE_INGRESS_BASE_DOMAIN=$AUTO_DEVOPS_DOMAIN
|
|
fi
|
|
}
|
|
|
|
function check_kube_domain() {
|
|
ensure_kube_ingress_base_domain
|
|
|
|
if [ -z ${KUBE_INGRESS_BASE_DOMAIN+x} ]; then
|
|
echo "In order to deploy or use Review Apps,"
|
|
echo "AUTO_DEVOPS_DOMAIN or KUBE_INGRESS_BASE_DOMAIN variables must be set"
|
|
echo "From 11.8, you can set KUBE_INGRESS_BASE_DOMAIN in cluster settings"
|
|
echo "or by defining a variable at group or project level."
|
|
echo "You can also manually add it in .gitlab-ci.yml"
|
|
echo "AUTO_DEVOPS_DOMAIN support will be dropped on 12.0"
|
|
false
|
|
else
|
|
true
|
|
fi
|
|
}
|
|
|
|
function build() {
|
|
registry_login
|
|
|
|
if [[ -f Dockerfile ]]; then
|
|
echo "Building Dockerfile-based application..."
|
|
docker build \
|
|
--build-arg HTTP_PROXY="$HTTP_PROXY" \
|
|
--build-arg http_proxy="$http_proxy" \
|
|
--build-arg HTTPS_PROXY="$HTTPS_PROXY" \
|
|
--build-arg https_proxy="$https_proxy" \
|
|
--build-arg FTP_PROXY="$FTP_PROXY" \
|
|
--build-arg ftp_proxy="$ftp_proxy" \
|
|
--build-arg NO_PROXY="$NO_PROXY" \
|
|
--build-arg no_proxy="$no_proxy" \
|
|
-t "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" .
|
|
else
|
|
echo "Building Heroku-based application using gliderlabs/herokuish docker image..."
|
|
docker run -i \
|
|
-e BUILDPACK_URL \
|
|
-e HTTP_PROXY \
|
|
-e http_proxy \
|
|
-e HTTPS_PROXY \
|
|
-e https_proxy \
|
|
-e FTP_PROXY \
|
|
-e ftp_proxy \
|
|
-e NO_PROXY \
|
|
-e no_proxy \
|
|
--name="$CI_CONTAINER_NAME" -v "$(pwd):/tmp/app:ro" gliderlabs/herokuish /bin/herokuish buildpack build
|
|
docker commit "$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG"
|
|
docker rm "$CI_CONTAINER_NAME" >/dev/null
|
|
echo ""
|
|
|
|
echo "Configuring $CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG docker image..."
|
|
docker create --expose 5000 --env PORT=5000 --name="$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" /bin/herokuish procfile start web
|
|
docker commit "$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG"
|
|
docker rm "$CI_CONTAINER_NAME" >/dev/null
|
|
echo ""
|
|
fi
|
|
|
|
echo "Pushing to GitLab Container Registry..."
|
|
docker push "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG"
|
|
echo ""
|
|
}
|
|
|
|
function initialize_tiller() {
|
|
echo "Checking Tiller..."
|
|
|
|
export HELM_HOST="localhost:44134"
|
|
tiller -listen ${HELM_HOST} -alsologtostderr > /dev/null 2>&1 &
|
|
echo "Tiller is listening on ${HELM_HOST}"
|
|
|
|
if ! helm version --debug; then
|
|
echo "Failed to init Tiller."
|
|
return 1
|
|
fi
|
|
echo ""
|
|
}
|
|
|
|
function create_secret() {
|
|
echo "Create secret..."
|
|
if [[ "$CI_PROJECT_VISIBILITY" == "public" ]]; then
|
|
return
|
|
fi
|
|
|
|
kubectl create secret -n "$KUBE_NAMESPACE" \
|
|
docker-registry gitlab-registry \
|
|
--docker-server="$CI_REGISTRY" \
|
|
--docker-username="${CI_DEPLOY_USER:-$CI_REGISTRY_USER}" \
|
|
--docker-password="${CI_DEPLOY_PASSWORD:-$CI_REGISTRY_PASSWORD}" \
|
|
--docker-email="$GITLAB_USER_EMAIL" \
|
|
-o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
|
|
}
|
|
|
|
function dast() {
|
|
export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
|
|
|
|
mkdir /zap/wrk/
|
|
/zap/zap-baseline.py -J gl-dast-report.json -t "$CI_ENVIRONMENT_URL" || true
|
|
cp /zap/wrk/gl-dast-report.json .
|
|
}
|
|
|
|
function performance() {
|
|
export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
|
|
|
|
mkdir gitlab-exporter
|
|
wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/10-5/index.js
|
|
|
|
mkdir sitespeed-results
|
|
|
|
if [ -f .gitlab-urls.txt ]
|
|
then
|
|
sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt
|
|
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt
|
|
else
|
|
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL"
|
|
fi
|
|
|
|
mv sitespeed-results/data/performance.json performance.json
|
|
}
|
|
|
|
function persist_environment_url() {
|
|
echo $CI_ENVIRONMENT_URL > environment_url.txt
|
|
}
|
|
|
|
function delete() {
|
|
track="${1-stable}"
|
|
name=$(deploy_name "$track")
|
|
|
|
if [[ -n "$(helm ls -q "^$name$")" ]]; then
|
|
helm delete --purge "$name"
|
|
fi
|
|
|
|
secret_name=$(application_secret_name "$track")
|
|
kubectl delete secret --ignore-not-found -n "$KUBE_NAMESPACE" "$secret_name"
|
|
}
|
|
|
|
before_script:
|
|
- *auto_devops
|