Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save josephsindel/a396f424b01dbc5932cfcad8b9876b3b to your computer and use it in GitHub Desktop.
Save josephsindel/a396f424b01dbc5932cfcad8b9876b3b to your computer and use it in GitHub Desktop.
pipeline {
agent any
environment {
DB_CURRENT = sh(returnStdout: true, script: '''mysql -ss -e 'select max(version) from schema_version' ringdna''')
DB_DEPLOYS = sh(returnStdout: true, script: '''ls /home/ubuntu/ringdna/server/app/db/deploy | cut -d '_' -f1 | cut -c2-''')
DID = "${JOB_NAME}-${BUILD_ID}"
ENV = "${JOB_NAME}"
IMAGE_ID = 'ami-04b45e6b88ad390ad'
INSTANCES = ""
PD = sh(returnStdout: true, script: '''aws autoscaling describe-auto-scaling-groups --region us-east-1 --region us-east-1 | jq -r '.AutoScalingGroups[].AutoScalingGroupName' | grep 'qa-api' ''')
PI = sh(returnStdout: true, script: '''aws autoscaling describe-auto-scaling-groups --region us-east-1 --auto-scaling-group-names $PD | jq -r '.AutoScalingGroups[].Instances[].InstanceId' ''')
PROFILE = "qa-play1-role"
REGION = "us-east-1"
SG = "sg-eb4a0c97"
SUBNETS = "subnet-44e3b61f,subnet-7ad98757"
DATA = """#cloud-boothook
#!/bin/bash
set -x
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
echo BEGIN
date '+%Y-%m-%d %H:%M:%S'
sudo aws s3 cp s3://ringdna-artifacts/${JOB_NAME}-${BUILD_ID}.zip /home/ubuntu
echo "PLAY_ID=$ENV \nNODE_TYPE=server"| cut -d'-' -f 1 | sudo tee /etc/default/play
sudo unzip /home/ubuntu/${JOB_NAME}-${BUILD_ID}.zip -d /home/ubuntu/api
sudo chown -R ubuntu /home/ubuntu/api/
sudo chgrp -R ubuntu /home/ubuntu/api/
sudo mv /home/ubuntu/application.conf.tmpl /home/ubuntu/api/api-0.1.0-SNAPSHOT/bin/
sudo chmod +x /home/ubuntu/api-envvars.sh
#sudo systemctl start api.service
"""
BDM = '''
[
{"DeviceName": "/dev/sdb","VirtualName": "ephemeral0"},
{"DeviceName": "/dev/sda1","Ebs": {"DeleteOnTermination": true,"VolumeSize": 8,"VolumeType": "gp2"}}
]
'''
}
options {
ansiColor('xterm')
withAWS(region:'us-east-1')
}
stages {
stage('Notify Started') {
steps {
slackSend (color: '#FFFF00', message: "STARTED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})")
}
}
stage('Sonarqube') {
environment {
scannerHome = tool 'sonarqube'
}
steps {
withSonarQubeEnv('sonarqube') {
sh "${scannerHome}/bin/sonar-scanner"
}
}
}
stage('Build') {
steps {
sh '''
echo $PWD
cd $PWD/packages/server
sbt sbtVersion
sbt clean
sbt dist
'''
}
}
stage('Rename Artifact') {
steps {
sh '''
cd $PWD
mv $PWD/packages/server/target/universal/api-0.1.0-SNAPSHOT.zip $PWD/${JOB_NAME}-${BUILD_ID}.zip
'''
}
}
stage('Upload Artifact When Tagged') {
steps {
s3Upload acl: 'Private',
bucket: 'ringdna-artifacts',
includePathPattern: '*.zip'
}
}
stage('Create Launch Configuration') {
steps {
sh 'aws autoscaling create-launch-configuration \
--launch-configuration-name $DID-api \
--region ${REGION} \
--key-name qa-20170803 \
--security-groups $SG \
--user-data "$DATA" \
--image-id $IMAGE_ID \
--instance-type t2.small \
--iam-instance-profile $PROFILE \
--block-device-mappings "$BDM" '
}
}
stage('Creating Auto Scaling Group') {
steps {
sh 'aws autoscaling create-auto-scaling-group \
--region us-east-1 --auto-scaling-group-name $DID-api \
--launch-configuration-name $DID-api \
--default-cooldown 1 \
--target-group-arns $TG \
--health-check-type ELB \
--min-size 0 \
--max-size 1 \
--desired-capacity 0 \
--vpc-zone-identifier "$SUBNETS" \
--health-check-grace-period 600 \
--tags ResourceId=$DID-api,ResourceType=auto-scaling-group,Key=Role,Value=qa \
&& aws autoscaling enable-metrics-collection \
--auto-scaling-group-name "$DID-api" \
--region ${REGION} \
--granularity 1Minute'
}
}
stage('Increasing Desired Capacity of Current Deployment Auto Scaling group') {
steps {
sh 'aws autoscaling set-desired-capacity \
--region ${REGION} \
--auto-scaling-group-name "$DID-api" \
--desired-capacity 1'
}
}
stage('Testing Migration'){
steps {
script{
def list = DB_DEPLOYS
list.tokenize("\n").each {
if (it.toInteger() > DB_CURRENT.toInteger()) {
sh "/home/ubuntu/ringdna/server/app/db/deploy/V${it}*.sh"
}
else {
print 'no db migrations to deploy'
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////// ///////////////////////////////
//////////////////////////// DO THIS STUFF AFTER CUTOVER ///////////////////////////////
//////////////////////////// ///////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
stage('Decreasing Desired Capacity of Previous Deployment Auto Scaling group') {
steps {
sh '''
aws autoscaling set-desired-capacity \
--region ${REGION} \
--auto-scaling-group-name ${PD} \
--desired-capacity 0
'''
}
}
stage('Terminating Instances from Previous Deployment') {
steps {
sh '''
echo 'previous deploy'
echo ${PD}
echo 'previos instance'
echo ${PI}
echo '*****************'
echo '*****************'
#for INSTANCES in ${PI}
# do
# aws ec2 terminate-instances --instance-ids \$INSTANCES --region us-east-1
#done
'''
}
}
/// stage('Locating Target Group') {
/// steps {
/// sh 'TG=$(aws elbv2 describe-target-groups \
/// --region $REGION \
/// --names qa-api \
/// --output text \
/// --query "TargetGroups[0].TargetGroupArn")'
/// }
/// }
///
/// stage('Delete Previous Deployment Autoscaling Group') {
/// steps {
/// sh '''
/// aws autoscaling delete-auto-scaling-group \
/// --auto-scaling-group-name $PD \
/// --region us-east-1 \
/// --force-delete
/// '''
/// }
/// }
/// stage('Delete Previous Deployment Launch Configuration') {
/// steps {
/// sh '''
/// aws autoscaling delete-launch-configuration \
/// --launch-configuration-name $PD \
/// --region us-east-1 \
/// '''
/// }
/// }
stage('Clean Workspace') {
steps {
cleanWs()
}
}
}
post {
success {
slackSend (color: '#00FF00', message: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})")
}
failure {
slackSend (color: '#FF0000', message: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})")
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment