Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save yihyang/5616ec1cac02ee5cb81217e3d14e75f3 to your computer and use it in GitHub Desktop.
Save yihyang/5616ec1cac02ee5cb81217e3d14e75f3 to your computer and use it in GitHub Desktop.
# Clean up Unused and Orphaned Persistent Disks
# Enable APIs and Clone Repository
gcloud services enable cloudscheduler.googleapis.com
git clone https://github.com/GoogleCloudPlatform/gcf-automated-resource-cleanup.git && cd gcf-automated-resource-cleanup/
export PROJECT_ID=$(gcloud config list --format 'value(core.project)' 2>/dev/null)
WORKDIR=$(pwd)
# Create Persistent Disks
cd $WORKDIR/unattached-pd
export ORPHANED_DISK=orphaned-disk
export UNUSED_DISK=unused-disk
gcloud beta compute disks create $ORPHANED_DISK --project=$PROJECT_ID --type=pd-standard --size=500GB --zone=us-central1-a
gcloud beta compute disks create $UNUSED_DISK --project=$PROJECT_ID --type=pd-standard --size=500GB --zone=us-central1-a
gcloud compute disks list
# Create a VM and inspect the disks
gcloud compute instances create disk-instance \
--zone=us-central1-a \
--machine-type=n1-standard-1 \
--disk=name=$ORPHANED_DISK,device-name=$ORPHANED_DISK,mode=rw,boot=no
gcloud compute disks describe $ORPHANED_DISK --zone=us-central1-a --format=json | jq
gcloud compute instances detach-disk disk-instance --device-name=$ORPHANED_DISK --zone=us-central1-a
gcloud compute disks describe $ORPHANED_DISK --zone=us-central1-a --format=json | jq
# Review the Cloud Function code
cat $WORKDIR/unattached-pd/main.py | grep "(request)" -A 12
cat $WORKDIR/unattached-pd/main.py | grep "handle never" -A 11
cat $WORKDIR/unattached-pd/main.py | grep "handle detached" -A 32
project = 'qwiklabs-gcp-b5dbc291a25a68db'
# Deploy the Cloud Function
gcloud functions deploy delete_unattached_pds --trigger-http --runtime=python37
export FUNCTION_URL=$(gcloud functions describe delete_unattached_pds --format=json | jq -r '.httpsTrigger.url')
# Schedule and test the Cloud Function
gcloud scheduler jobs create http unattached-pd-job \
--schedule="* 2 * * *" \
--uri=$FUNCTION_URL
gcloud scheduler jobs run unattached-pd-job
# Enable APIs and Clone Repository
gcloud services enable cloudscheduler.googleapis.com
git clone https://github.com/GoogleCloudPlatform/gcf-automated-resource-cleanup.git && cd gcf-automated-resource-cleanup/
export PROJECT_ID=$(gcloud config list --format 'value(core.project)' 2>/dev/null)
export region=us-central1
WORKDIR=$(pwd)
# Create IP Addresses
cd $WORKDIR/unused-ip
export USED_IP=used-ip-address
export UNUSED_IP=unused-ip-address
gcloud compute addresses create $USED_IP --project=$PROJECT_ID --region=us-central1
gcloud compute addresses create $UNUSED_IP --project=$PROJECT_ID --region=us-central1
gcloud compute addresses list --filter="region:(us-central1)"
export USED_IP_ADDRESS=$(gcloud compute addresses describe $USED_IP --region=us-central1 --format=json | jq -r '.address')
# Create a VM
gcloud compute instances create static-ip-instance \
--zone=us-central1-a \
--machine-type=n1-standard-1 \
--subnet=default \
--address=$USED_IP_ADDRESS
gcloud compute addresses list --filter="region:(us-central1)"
# Review the Cloud Function code
# Deploy the Cloud Function
gcloud functions deploy unused_ip_function --trigger-http --runtime=nodejs8
export FUNCTION_URL=$(gcloud functions describe unused_ip_function --format=json | jq -r '.httpsTrigger.url')
# Schedule and test the Cloud Function
gcloud scheduler jobs create http unused-ip-job \
--schedule="* 2 * * *" \
--uri=$FUNCTION_URL
gcloud scheduler jobs run unused-ip-job
gcloud compute addresses list --filter="region:(us-central1)"
NAME ADDRESS/RANGE TYPE PURPOSE NETWORK REGION SUBNET STATUS
used-ip-address 104.197.56.87 EXTERNAL us-central1 IN_USE
# Create a function
mkdir gcf_hello_world
cd gcf_hello_world
nano index.js
/**
* Cloud Function.
*
* @param {object} event The Cloud Functions event.
* @param {function} callback The callback function.
*/
exports.helloWorld = function helloWorld (event, callback) {
console.log(`My Cloud Function: ${JSON.stringify(event.data.message)}`);
callback();
};
# Create a cloud storage bucket
gsutil mb -p [PROJECT_ID] gs://[BUCKET_NAME]
# Deploy your function
gcloud functions deploy helloWorld \
--stage-bucket [BUCKET_NAME] \
--trigger-topic hello_world \
--runtime nodejs6
gcloud functions describe helloWorld
# Test the function
gcloud functions call helloWorld --data '{"message":"Hello World!"}'
# View logs
gcloud functions logs read helloWorld
# Add Apache2 HTTP Server to your instance
sudo apt-get update
sudo apt-get install apache2 php7.0
sudo service apache2 restart
## install monitoring agent
curl -sSO https://dl.google.com/cloudagents/install-monitoring-agent.sh
sudo bash install-monitoring-agent.sh
## install logging agent
curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh
sudo bash install-logging-agent.sh
# Before you begin, you need an app to scan
git clone https://github.com/GoogleCloudPlatform/python-docs-samples
cd python-docs-samples/appengine/standard_python37/hello_world
# Test app
dev_appserver.py app.yaml
# Deploy App
gcloud app deploy
# View App
gcloud app browse
# Continuous Delivery with Jenkins in Kubernetes Engine
# Clone the repository
gcloud config set compute/zone us-east1-d
git clone https://github.com/GoogleCloudPlatform/continuous-deployment-on-kubernetes.git
cd continuous-deployment-on-kubernetes
# Provisioning Jenkins
gcloud container clusters create jenkins-cd \
--num-nodes 2 \
--machine-type n1-standard-2 \
--scopes "https://www.googleapis.com/auth/source.read_write,cloud-platform"
gcloud container clusters list
gcloud container clusters get-credentials jenkins-cd
kubectl cluster-info
# Install Helm
wget https://storage.googleapis.com/kubernetes-helm/helm-v2.14.1-linux-amd64.tar.gz
tar zxfv helm-v2.14.1-linux-amd64.tar.gz
cp linux-amd64/helm .
kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=$(gcloud config get-value account)
kubectl create serviceaccount tiller --namespace kube-system
kubectl create clusterrolebinding tiller-admin-binding --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
./helm init --service-account=tiller
./helm update
./helm version
# Configure and Install Jenkins
./helm install -n cd stable/jenkins -f jenkins/values.yaml --version 1.2.2 --wait
kubectl get pods
kubectl create clusterrolebinding jenkins-deploy --clusterrole=cluster-admin --serviceaccount=default:cd-jenkins
export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/component=jenkins-master" -l "app.kubernetes.io/instance=cd" -o jsonpath="{.items[0].metadata.name}")
kubectl port-forward $POD_NAME 8080:8080 >> /dev/null &
kubectl get svc
# Connect to Jenkins
printf $(kubectl get secret cd-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo
# Deploying the Application
cd sample-app
kubectl create ns production
kubectl apply -f k8s/production -n production
kubectl apply -f k8s/canary -n production
kubectl apply -f k8s/services -n production
kubectl scale deployment gceme-frontend-production -n production --replicas 4
kubectl get pods -n production -l app=gceme -l role=frontend
kubectl get pods -n production -l app=gceme -l role=backend
kubectl get service gceme-frontend -n production
export FRONTEND_SERVICE_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" --namespace=production services gceme-frontend)
curl http://$FRONTEND_SERVICE_IP/version
# Creating the Jenkins Pipeline
gcloud source repos create default
git init
git config credential.helper gcloud.sh
git remote add origin https://source.developers.google.com/p/$DEVSHELL_PROJECT_ID/r/default
git config --global user.email "[EMAIL_ADDRESS]"
git config --global user.name "[USERNAME]"
git add .
git commit -m "Initial commit"
git push origin master
# Creating the Development Environment
git checkout -b new-feature
vi Jenkinsfile
def project = 'REPLACE_WITH_YOUR_PROJECT_ID'
def appName = 'gceme'
def feSvcName = "${appName}-frontend"
def imageTag = "gcr.io/${project}/${appName}:${env.BRANCH_NAME}.${env.BUILD_NUMBER}"
vi html.go
vi main.go
# Kick off Deployment
git add Jenkinsfile html.go main.go
git commit -m "Version 2.0.0"
git push origin new-feature
kubectl proxy &
curl \
http://localhost:8001/api/v1/namespaces/new-feature/services/gceme-frontend:80/proxy/version
git checkout -b canary
git push origin canary
export FRONTEND_SERVICE_IP=$(kubectl get -o \
jsonpath="{.status.loadBalancer.ingress[0].ip}" --namespace=production services gceme-frontend)
while true; do curl http://$FRONTEND_SERVICE_IP/version; sleep 1; done
git checkout master
git merge canary
git push origin master
export FRONTEND_SERVICE_IP=$(kubectl get -o \
jsonpath="{.status.loadBalancer.ingress[0].ip}" --namespace=production services gceme-frontend)
while true; do curl http://$FRONTEND_SERVICE_IP/version; sleep 1; done
kubectl get service gceme-frontend -n production
# Create a virtual environment
sudo apt-get update
sudo apt-get install virtualenv
virtualenv -p python3 venv
source venv/bin/activate
# Clone the Deployment Manager Sample Templates
mkdir ~/dmsamples
cd ~/dmsamples
git clone https://github.com/GoogleCloudPlatform/deploymentmanager-samples.git
# Customize the Deployment
# Run the Application
gcloud deployment-manager deployments create advanced-configuration --config nodejs.yaml
# Verify that the application is operational
## Find the global load balancer forwarding rule IP address
gcloud compute forwarding-rules list
# Kubernetes in Google Cloud: Challenge Lab
# Task 1: Create a Docker image and store the Dockerfile
gsutil cat gs://cloud-training/gsp318/marking/setup_marking.sh | bash
gcloud source repos clone valkyrie-app
cd valkyrie-app
cat > Dockerfile <<EOF
FROM golang:1.10
WORKDIR /go/src/app
COPY source .
RUN go install -v
ENTRYPOINT ["app","-single=true","-port=8080"]
EOF
docker build -t valkyrie-app:v0.0.1 .
# Task 2: Test the created Docker image
docker run -p 8080:8080 valkyrie-app:v0.0.1 &
step2.sh
# Task 3: Push the Docker image in the Container Repository
docker tag valkyrie-app:v0.0.1 gcr.io/$GOOGLE_CLOUD_PROJECT/valkyrie-app:v0.0.1
docker push gcr.io/$GOOGLE_CLOUD_PROJECT/valkyrie-app:v0.0.1
# Task 4: Create and expose a deployment in Kubernetes
sed -i s#IMAGE_HERE#gcr.io/$GOOGLE_CLOUD_PROJECT/valkyrie-app:v0.0.1#g k8s/deployment.yaml
gcloud container clusters get-credentials valkyrie-dev --zone us-east1-d
kubectl create -f k8s/deployment.yaml
kubectl create -f k8s/service.yaml
# Task 5: Update the deployment with a new version of valkyrie-app
git merge origin/kurt-dev
kubectl edit deployment valkyrie-dev
docker build -t gcr.io/$GOOGLE_CLOUD_PROJECT/valkyrie-app:v0.0.2 .
docker push gcr.io/$GOOGLE_CLOUD_PROJECT/valkyrie-app:v0.0.2
kubectl edit deployment valkyrie-dev
# Task 6: Create a pipeline in Jenkins to deploy your app
docker ps
docker kill container_id
export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/component=jenkins-master" -l "app.kubernetes.io/instance=cd" -o jsonpath="{.items[0].metadata.name}")
kubectl port-forward $POD_NAME 8080:8080 >> /dev/null &
printf $(kubectl get secret cd-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo
gcloud source repos list
sed -i "s/green/orange/g" source/html.go
sed -i "s/YOUR_PROJECT/$GOOGLE_CLOUD_PROJECT/g" Jenkinsfile
git config --global user.email "you@example.com"
git config --global user.name "student"
git add .
git commit -m "build pipeline init"
# Create custom mode VPC networks with firewall rules
gcloud compute --project=qwiklabs-gcp-02-880c3276d62a networks create managementnet --subnet-mode=custom
gcloud compute --project=qwiklabs-gcp-02-880c3276d62a networks subnets create managementsubnet-us --network=managementnet --region=us-central1 --range=10.130.0.0/20
gcloud compute networks create privatenet --subnet-mode=custom
gcloud compute networks subnets create privatesubnet-us --network=privatenet --region=us-central1 --range=172.16.0.0/24
gcloud compute networks subnets create privatesubnet-eu --network=privatenet --region=europe-west1 --range=172.20.0.0/20
gcloud compute networks list
gcloud compute networks subnets list --sort-by=NETWORK
gcloud compute --project=qwiklabs-gcp-02-880c3276d62a firewall-rules create managementnet-allow-icmp-ssh-rdp --direction=INGRESS --priority=1000 --network=managementnet --action=ALLOW --rules=tcp:22,tcp:3389,icmp --source-ranges=0.0.0.0/0
gcloud compute firewall-rules create privatenet-allow-icmp-ssh-rdp --direction=INGRESS --priority=1000 --network=privatenet --action=ALLOW --rules=icmp,tcp:22,tcp:3389 --source-ranges=0.0.0.0/0
# Create VM instances
gcloud beta compute --project=qwiklabs-gcp-02-880c3276d62a instances create managementnet-us-vm --zone=us-central1-c --machine-type=f1-micro --subnet=managementsubnet-us --network-tier=PREMIUM --maintenance-policy=MIGRATE --service-account=276168883301-compute@developer.gserviceaccount.com --scopes=https://www.googleapis.com/auth/devstorage.read_only,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/servicecontrol,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/trace.append --image=debian-9-stretch-v20200420 --image-project=debian-cloud --boot-disk-size=10GB --boot-disk-type=pd-standard --boot-disk-device-name=managementnet-us-vm --reservation-affinity=any
gcloud compute instances create privatenet-us-vm --zone=us-central1-c --machine-type=n1-standard-1 --subnet=privatesubnet-us
# Create a VM instance with multiple network interfaces
# Optimizing cost with Google Cloud Storage
# Enable APIs and clone repository
gcloud services enable cloudscheduler.googleapis.com
git clone https://github.com/GoogleCloudPlatform/gcf-automated-resource-cleanup.git && cd gcf-automated-resource-cleanup/
export PROJECT_ID=$(gcloud config list --format 'value(core.project)' 2>/dev/null)
WORKDIR=$(pwd)
# Create Cloud Storage buckets and add a file
cd $WORKDIR/migrate-storage
export PROJECT_ID=$(gcloud config list --format 'value(core.project)' 2>/dev/null)
gsutil mb -c regional -l us-central1 gs://${PROJECT_ID}-serving-bucket
gsutil acl ch -u allUsers:R gs://${PROJECT_ID}-serving-bucket
gsutil cp $WORKDIR/migrate-storage/testfile.txt gs://${PROJECT_ID}-serving-bucket
gsutil acl ch -u allUsers:R gs://${PROJECT_ID}-serving-bucket/testfile.txt
curl http://storage.googleapis.com/${PROJECT_ID}-serving-bucket/testfile.txt
curl http://storage.googleapis.com/${PROJECT_ID}-serving-bucket/testfile.txt
# Create a Monitoring dashboard
# Generate load on the serving bucket
ab -n 10000 http://storage.googleapis.com/$PROJECT_ID-serving-bucket/testfile.txt
# Review and deploy the Cloud Function
cat $WORKDIR/migrate-storage/main.py | grep "migrate_storage(" -A 15
gcloud functions deploy migrate_storage --trigger-http --runtime=python37
export FUNCTION_URL=$(gcloud functions describe migrate_storage --format=json | jq -r '.httpsTrigger.url')
envsubst < $WORKDIR/migrate-storage/incident.json | curl -X POST -H "Content-Type: application/json" $FUNCTION_URL -d @-
# Test and validate alerting automation
export IDLE_BUCKET_NAME=$PROJECT_ID-idle-bucket
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment