FILE # 1
#!groovy
String GIT_VERSION
node {
def buildEnv
def devAddress
stage ('Checkout') {
deleteDir()
checkout scm
GIT_VERSION = sh (
script: 'git describe --tags',
returnStdout: true
).trim()
}
stage ('Build Custom Environment') {
buildEnv = docker.build("build_env:${GIT_VERSION}", 'custom-build-env')
}
buildEnv.inside {
stage ('Build') {
sh 'sbt compile'
sh 'sbt sampleClient/universal:stage'
}
stage ('Test') {
parallel (
'Test Server' : {
sh 'sbt server/test'
},
'Test Sample Client' : {
sh 'sbt sampleClient/test'
}
)
}
stage ('Prepare Docker Image') {
sh 'sbt server/docker:stage'
}
}
stage ('Build and Push Docker Image') {
withCredentials([[$class: "UsernamePasswordMultiBinding", usernameVariable: 'DOCKERHUB_USER', passwordVariable: 'DOCKERHUB_PASS', credentialsId: 'Docker Hub']]) {
sh 'docker login --username $DOCKERHUB_USER --password $DOCKERHUB_PASS'
}
def serverImage = docker.build("sambott/grpc-test:${GIT_VERSION}", 'server/target/docker/stage')
serverImage.push()
sh 'docker logout'
}
stage ('Deploy to DEV') {
devAddress = deployContainer("sambott/grpc-test:${GIT_VERSION}", 'DEV')
}
stage ('Verify Deployment') {
buildEnv.inside {
sh "sample-client/target/universal/stage/bin/demo-client ${devAddress}"
}
}
}
stage 'Deploy to LIVE'
timeout(time:2, unit:'DAYS') {
input message:'Approve deployment to LIVE?'
}
node {
deployContainer("sambott/grpc-test:${GIT_VERSION}", 'LIVE')
}
def deployContainer(image, env) {
docker.image('lachlanevenson/k8s-kubectl:v1.5.2').inside {
withCredentials([[$class: "FileBinding", credentialsId: 'KubeConfig', variable: 'KUBE_CONFIG']]) {
def kubectl = "kubectl --kubeconfig=\$KUBE_CONFIG --context=${env}"
sh "${kubectl} set image deployment/grpc-demo grpc-demo=${image}"
sh "${kubectl} rollout status deployment/grpc-demo"
return sh (
script: "${kubectl} get service/grpc-demo -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'",
returnStdout: true
).trim()
}
}
}
FILE # 2
node {
stage('checkout') {
checkout scm
}
docker.image('openjdk:8').inside('-u root -e MAVEN_OPTS="-Duser.home=./"') {
stage('check java') {
sh "java -version"
}
stage('clean') {
sh "chmod +x mvnw"
sh "./mvnw clean"
}
stage('install tools') {
sh "./mvnw com.github.eirslett:frontend-maven-plugin:install-node-and-yarn -DnodeVersion=v6.11.1 -DyarnVersion=v0.27.5"
}
stage('yarn install') {
sh "./mvnw com.github.eirslett:frontend-maven-plugin:yarn"
}
stage('backend tests') {
try {
sh "./mvnw test"
} catch(err) {
throw err
} finally {
junit '**/target/surefire-reports/TEST-*.xml'
}
}
stage('frontend tests') {
try {
sh "./mvnw com.github.eirslett:frontend-maven-plugin:yarn -Dfrontend.yarn.arguments=test"
} catch(err) {
throw err
} finally {
junit '**/target/test-results/karma/TESTS-*.xml'
}
}
stage('packaging') {
sh "./mvnw package -Pprod -DskipTests"
archiveArtifacts artifacts: '**/target/*.war', fingerprint: true
}
stage('quality analysis') {
withSonarQubeEnv('Sonar') {
sh "./mvnw sonar:sonar"
}
}
}
def dockerImage
stage('build docker') {
sh "cp -R src/main/docker target/"
sh "cp target/*.war target/docker/"
dockerImage = docker.build('moviemanager', 'target/docker')
}
stage('publish docker') {
docker.withRegistry('https://registry.hub.docker.com', 'docker-login') {
dockerImage.push 'latest'
}
}
}
stage 'Trigger e2e tests'
waitUntil {
try {
def chartRepoType = git_branch == "master" ? 'dev' : 'pr'
build job: 'workflow-chart-e2e', parameters: [
[$class: 'StringParameterValue', name: 'WORKFLOW_CLI_SHA', value: git_commit],
[$class: 'StringParameterValue', name: 'ACTUAL_COMMIT', value: git_commit],
[$class: 'StringParameterValue', name: 'COMPONENT_REPO', value: 'workflow-cli'],
[$class: 'StringParameterValue', name: 'CHART_REPO_TYPE', value: chartRepoType],
[$class: 'StringParameterValue', name: 'UPSTREAM_SLACK_CHANNEL', value: '#controller']]
true
} catch(error) {
if (git_branch == "master") {
throw error
}
node(linux) {
withCredentials([[$class: 'StringBinding', credentialsId: '8a727911-596f-4057-97c2-b9e23de5268d', variable: 'SLACKEMAIL']]) {
mail body: """<!DOCTYPE html>
<html>
<head>
<meta content='text/html; charset=UTF-8' http-equiv='Content-Type' />
</head>
<body>
<div>Author: ${env.CHANGE_AUTHOR}<br/>
Branch: ${env.BRANCH_NAME}<br/>
Commit: ${env.CHANGE_TITLE}<br/>
<a href="${env.BUILD_URL}console">Click here</a> to view logs.</p>
<a href="${env.BUILD_URL}input/">Click here</a> to restart e2e.</p>
</div>
</html>
""", from: 'jenkins@ci.deis.io', subject: 'Workflow CLI E2E Test Failure', to: env.SLACKEMAIL, mimeType: 'text/html'
}
input "Retry the e2e tests?"
}
false
}
}
podTemplate(label: 'pod-hugo-app', containers: [
containerTemplate(name: 'hugo', image: 'smesch/hugo', ttyEnabled: true, command: 'cat'),
containerTemplate(name: 'html-proofer', image: 'smesch/html-proofer', ttyEnabled: true, command: 'cat'),
containerTemplate(name: 'kubectl', image: 'smesch/kubectl', ttyEnabled: true, command: 'cat',
volumes: [secretVolume(secretName: 'kube-config', mountPath: '/root/.kube')]),
containerTemplate(name: 'docker', image: 'docker', ttyEnabled: true, command: 'cat',
envVars: [containerEnvVar(key: 'DOCKER_CONFIG', value: '/tmp/'),])],
volumes: [secretVolume(secretName: 'docker-config', mountPath: '/tmp'),
hostPathVolume(hostPath: '/var/run/docker.sock', mountPath: '/var/run/docker.sock')
]) {
node('pod-hugo-app') {
def DOCKER_HUB_ACCOUNT = 'smesch'
def DOCKER_IMAGE_NAME = 'hugo-app-jenkins'
def K8S_DEPLOYMENT_NAME = 'hugo-app'
stage('Clone Hugo App Repository') {
checkout scm
container('hugo') {
stage('Build Hugo Site') {
sh ("hugo --uglyURLs")
}
}
container('html-proofer') {
stage('Validate HTML') {
sh ("htmlproofer public --internal-domains ${env.JOB_NAME} --external_only --only-4xx")
}
}
container('docker') {
stage('Docker Build & Push Current & Latest Versions') {
sh ("docker build -t ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:${env.BUILD_NUMBER} .")
sh ("docker push ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:${env.BUILD_NUMBER}")
sh ("docker tag ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:${env.BUILD_NUMBER} ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:latest")
sh ("docker push ${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:latest")
}
}
container('kubectl') {
stage('Deploy New Build To Kubernetes') {
sh ("kubectl set image deployment/${K8S_DEPLOYMENT_NAME} ${K8S_DEPLOYMENT_NAME}=${DOCKER_HUB_ACCOUNT}/${DOCKER_IMAGE_NAME}:${env.BUILD_NUMBER}")
}
}
}
}
}
PROBLEM
oes there exist any solution e.g. we would like to setup alerts if there is a pod in cluster which doesn't have any limits set? i mean we want to allow developers to manage their pipelines in whatever way they like but still we want to have a notification if there is a pod which doesn't any limits set ...
so, that we can chase that team and ask them to update their deployments
SOLUTION
I guess we could add a validation step in a release; or in a PR on an environment to reject PRs which are missing, say, resources
validating all charts in an env have limits sounds a nice option
I’m not aware of anything but it might be nice to check in CI that there’s no limits and fail the CI checks. Not sure if we can add policy to cover that in sonarqube for example
or maybe a custom controller that analyses pods in a preview env
if folks were using GitOps on an environment, it could validate the charts in the CI build
via either a helm pre-install hook https://github.com/kubernetes/helm/blob/master/docs/charts_hooks.md#the-available-hooks or just generating templates of the charts and validating the output YAML for all
Deployments
and assert they have limits etcwonder if we could extend
helm lint
to add more validations? https://github.com/kubernetes/helm/blob/master/docs/helm/helm_lint.mdif anyone can
kubectl apply
at any time then a controller is your only hope. A GitOps validation check on PRs would be cooler though & give better feedback & could automate fixing bad PRs over time by adding better default limits automatically