FILE # 1
#!groovy
String GIT_VERSION
node {
def buildEnv
def devAddress
stage ('Checkout') {
deleteDir()
checkout scm
GIT_VERSION = sh (
script: 'git describe --tags',
returnStdout: true
).trim()
}
stage ('Build Custom Environment') {
buildEnv = docker.build("build_env:${GIT_VERSION}", 'custom-build-env')
}
buildEnv.inside {
stage ('Build') {
sh 'sbt compile'
sh 'sbt sampleClient/universal:stage'
}
stage ('Test') {
parallel (
'Test Server' : {
sh 'sbt server/test'
},
'Test Sample Client' : {
sh 'sbt sampleClient/test'
}
)
}
stage ('Prepare Docker Image') {
sh 'sbt server/docker:stage'
}
}
stage ('Build and Push Docker Image') {
withCredentials([[$class: "UsernamePasswordMultiBinding", usernameVariable: 'DOCKERHUB_USER', passwordVariable: 'DOCKERHUB_PASS', credentialsId: 'Docker Hub']]) {
sh 'docker login --username $DOCKERHUB_USER --password $DOCKERHUB_PASS'
}
def serverImage = docker.build("sambott/grpc-test:${GIT_VERSION}", 'server/target/docker/stage')
serverImage.push()
sh 'docker logout'
}
stage ('Deploy to DEV') {
devAddress = deployContainer("sambott/grpc-test:${GIT_VERSION}", 'DEV')
}
stage ('Verify Deployment') {
buildEnv.inside {
sh "sample-client/target/universal/stage/bin/demo-client ${devAddress}"
}
}
}
stage 'Deploy to LIVE'
timeout(time:2, unit:'DAYS') {
input message:'Approve deployment to LIVE?'
}
node {
deployContainer("sambott/grpc-test:${GIT_VERSION}", 'LIVE')
}
def deployContainer(image, env) {
docker.image('lachlanevenson/k8s-kubectl:v1.5.2').inside {
withCredentials([[$class: "FileBinding", credentialsId: 'KubeConfig', variable: 'KUBE_CONFIG']]) {
def kubectl = "kubectl --kubeconfig=\$KUBE_CONFIG --context=${env}"
sh "${kubectl} set image deployment/grpc-demo grpc-demo=${image}"
sh "${kubectl} rollout status deployment/grpc-demo"
return sh (
script: "${kubectl} get service/grpc-demo -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'",
returnStdout: true
).trim()
}
}
}
FILE # 2
node {
stage('checkout') {
checkout scm
}
docker.image('openjdk:8').inside('-u root -e MAVEN_OPTS="-Duser.home=./"') {
stage('check java') {
sh "java -version"
}
stage('clean') {
sh "chmod +x mvnw"
sh "./mvnw clean"
}
stage('install tools') {
sh "./mvnw com.github.eirslett:frontend-maven-plugin:install-node-and-yarn -DnodeVersion=v6.11.1 -DyarnVersion=v0.27.5"
}
stage('yarn install') {
sh "./mvnw com.github.eirslett:frontend-maven-plugin:yarn"
}
stage('backend tests') {
try {
sh "./mvnw test"
} catch(err) {
throw err
} finally {
junit '**/target/surefire-reports/TEST-*.xml'
}
}
stage('frontend tests') {
try {
sh "./mvnw com.github.eirslett:frontend-maven-plugin:yarn -Dfrontend.yarn.arguments=test"
} catch(err) {
throw err
} finally {
junit '**/target/test-results/karma/TESTS-*.xml'
}
}
stage('packaging') {
sh "./mvnw package -Pprod -DskipTests"
archiveArtifacts artifacts: '**/target/*.war', fingerprint: true
}
stage('quality analysis') {
withSonarQubeEnv('Sonar') {
sh "./mvnw sonar:sonar"
}
}
}
def dockerImage
stage('build docker') {
sh "cp -R src/main/docker target/"
sh "cp target/*.war target/docker/"
dockerImage = docker.build('moviemanager', 'target/docker')
}
stage('publish docker') {
docker.withRegistry('https://registry.hub.docker.com', 'docker-login') {
dockerImage.push 'latest'
}
}
}
stage 'Trigger e2e tests'
waitUntil {
try {
def chartRepoType = git_branch == "master" ? 'dev' : 'pr'
build job: 'workflow-chart-e2e', parameters: [
[$class: 'StringParameterValue', name: 'WORKFLOW_CLI_SHA', value: git_commit],
[$class: 'StringParameterValue', name: 'ACTUAL_COMMIT', value: git_commit],
[$class: 'StringParameterValue', name: 'COMPONENT_REPO', value: 'workflow-cli'],
[$class: 'StringParameterValue', name: 'CHART_REPO_TYPE', value: chartRepoType],
[$class: 'StringParameterValue', name: 'UPSTREAM_SLACK_CHANNEL', value: '#controller']]
true
} catch(error) {
if (git_branch == "master") {
throw error
}
node(linux) {
withCredentials([[$class: 'StringBinding', credentialsId: '8a727911-596f-4057-97c2-b9e23de5268d', variable: 'SLACKEMAIL']]) {
mail body: """<!DOCTYPE html>
<html>
<head>
<meta content='text/html; charset=UTF-8' http-equiv='Content-Type' />
</head>
<body>
<div>Author: ${env.CHANGE_AUTHOR}<br/>
Branch: ${env.BRANCH_NAME}<br/>
Commit: ${env.CHANGE_TITLE}<br/>
<a href="${env.BUILD_URL}console">Click here</a> to view logs.</p>
<a href="${env.BUILD_URL}input/">Click here</a> to restart e2e.</p>
</div>
</html>
""", from: 'jenkins@ci.deis.io', subject: 'Workflow CLI E2E Test Failure', to: env.SLACKEMAIL, mimeType: 'text/html'
}
input "Retry the e2e tests?"
}
false
}
}
podTemplate(label: 'pod-hugo-app', containers: [
containerTemplate(name: 'hugo', image: 'smesch/hugo', ttyEnabled: true, command: 'cat'),
containerTemplate(name: 'html-proofer', image: 'smesch/html-proofer', ttyEnabled: true, command: 'cat'),
containerTemplate(name: 'kubectl', image: 'smesch/kubectl', ttyEnabled: true, command: 'cat',
volumes: [secretVolume(secretName: 'kube-config', mountPath: '/root/.kube')]),
containerTemplate(name: 'docker', image: 'docker', ttyEnabled: true, command: 'cat',
envVars: [containerEnvVar(key: 'DOCKER_CONFIG', value: '/tmp/'),])],
volumes: [secretVolume(secretName: 'docker-config', mountPath: '/tmp'),
hostPathVolume(hostPath: '/var/run/docker.sock', mountPath: '/var/run/docker.sock')
]) {
node('pod-hugo-app') {
def DOCKER_HUB_ACCOUNT = 'smesch'
def DOCKER_IMAGE_NAME = 'hugo-app-jenkins'
def K8S_DEPLOYMENT_NAME = 'hugo-app'
stage('Clone Hugo App Repository') {
checkout scm
container('hugo') {
stage('Build Hugo Site') {
sh ("hugo --uglyURLs")
}
}
container('html-proofer') {
stage('Validate HTML') {
sh ("htmlproofer public --internal-domains ${env.JOB_NAME} --external_only --only-4xx")
}
}
container('docker') {
stage('Docker Build & Push Current & Latest Versions') {
sh ("docker build -t ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:${env.BUILD_NUMBER} .")
sh ("docker push ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:${env.BUILD_NUMBER}")
sh ("docker tag ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:${env.BUILD_NUMBER} ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:latest")
sh ("docker push ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:latest")
}
}
container('kubectl') {
stage('Deploy New Build To Kubernetes') {
sh ("kubectl set image deployment/${K8S_DEPLOYMENT_NAME} ${K8S_DEPLOYMENT_NAME}=${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:${env.BUILD_NUMBER}")
}
}
}
}
}
PROBLEM
how does kubeclt drain, cordon works?
SOLUTION
The given node will be marked unschedulable to prevent new pods from arriving. 'drain' evicts the pods if the APIServer supports eviciton (http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use normal DELETE to delete the pods. The 'drain' evicts or deletes all pods except mirror pods (which cannot be deleted through the API server).
If there are DaemonSet-managed pods, drain will not proceed without --ignore-daemonsets, and regardless it will not delete any DaemonSet-managed pods, because those pods would be immediately replaced by the DaemonSet controller, which ignores unschedulable markings.
If there are any pods that are neither mirror pods nor managed by ReplicationController, ReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete any pods unless you use --force.
'drain' waits for graceful termination. You should not operate on the machine until the command completes.
When you are ready to put the node back into service, use kubectl uncordon, which will make the node schedulable again.