Skip to content

Instantly share code, notes, and snippets.

Forked from pydevops/
Created September 9, 2019 03:22
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
Star You must be signed in to star a gist
Save xditx32/3093506f9b9001ec19d64fcbcec21b6d to your computer and use it in GitHub Desktop.
gcp gcloud cheat sheet


Other cheatsheets

multiple gcloud config configurations

gcloud config configurations create pythonrocks
gcloud config configurations list
gcloud config configurations activate pythonrocks
gcloud config set core/account
gcloud auth login
gcloud projects list
gcloud config set project dev-193420

switch gcloud context with gcloud config

gcloud config list
gcloud config set account 
gcloud config set project salt-163215
gcloud config set compute/region us-west1
gcloud config set compute/zone us-west1-a
alias demo='gcloud config set account && gcloud config set project salt-163215 && gcloud config set compute/region us-west1 && gcloud config set compute/zone us-west1-a'

cluster=$(gcloud config get-value container/cluster 2> /dev/null)
zone=$(gcloud config get-value compute/zone 2> /dev/null)
project=$(gcloud config get-value core/project 2> /dev/null)

# switch project based on the name
gcloud config set project $(gcloud projects list --filter='name:wordpress-dev' --format='value(project_id)')

command -v gcloud >/dev/null 2>&1 || { \
 echo >&2 "I require gcloud but it's not installed.  Aborting."; exit 1; }

REGION=$(gcloud config get-value compute/region)
if [[ -z "${REGION}" ]]; then
    echo "" 1>&2
    echo "gcloud cli must be configured with a default region." 1>&2
    echo "run 'gcloud config set compute/region REGION'." 1>&2
    echo "replace 'REGION' with the region name like us-west1." 1>&2
    exit 1;


gcloud auth list
gcloud auth login
gcloud auth activate-service-account --key-file=sa_key.json

kubectl uses OAuth token generated by

  • gcloud config config-helper --format json
  • gcloud config config-helper --format='value(credential.access_token)'
  • gcloud auth print-access-token generates new token


gcloud info --format flattened
export PROJECT=$(gcloud info --format='value(config.project)')


# various way to get project_id
PROJECT_ID=$(gcloud config get-value core/project)
PROJECT_ID=$(gcloud config list project --format='value(core.project)')
PROJECT_ID=$(gcloud info --format='value(config.project)')

# get project_number given project_id or name
gcloud projects list --filter="project_id:${project_id}"  --format='value(project_number)'
gcloud projects list --filter="name:${project_name}"  --format='value(project_number)'


To return a list of zones given a region

gcloud compute zones list --filter=region:us-central1


gcloud beta billing accounts list
gcloud organizations list

IAM list permission and roles for a given resource

gcloud iam list-testable-permissions <uri>
e.g gcloud iam list-testable-permissions //$PROJECT_ID

gcloud iam list-grantable-roles <uri>
gcloud iam list-grantable-roles //$PROJECT_ID
gcloud iam list-grantable-roles$PROJECT_ID/zones/us-central1-a/instances/iowa1

# get uri e.g.
gcloud projects list --uri

IAM service account

export SA_EMAIL=$(gcloud iam service-accounts list \
    --filter="displayName:jenkins" --format='value(email)')
export PROJECT=$(gcloud info --format='value(config.project)')

# creaate and list sa
gcloud iam service-accounts create jenkins --display-name jenkins
gcloud iam service-accounts list
gcloud iam service-accounts list   --filter='email ~ [0-9]*-compute@.*'   --format='table(email)'

# create & list sa key  
gcloud iam service-accounts keys create jenkins-sa.json --iam-account $SA_EMAIL    
gcloud iam service-accounts keys list --iam-account=vault-admin@<project_id>

# project level: grant roles to sa
gcloud projects get-iam-policy $PROJECT
gcloud projects add-iam-policy-binding $PROJECT  --role roles/storage.admin \
    --member serviceAccount:$SA_EMAIL
gcloud projects add-iam-policy-binding $PROJECT --role roles/compute.instanceAdmin.v1 \
    --member serviceAccount:$SA_EMAIL
gcloud projects add-iam-policy-binding $PROJECT --role roles/compute.networkAdmin \
    --member serviceAccount:$SA_EMAIL
gcloud projects add-iam-policy-binding $PROJECT --role roles/compute.securityAdmin \
    --member serviceAccount:$SA_EMAIL
gcloud projects add-iam-policy-binding $PROJECT --role roles/iam.serviceAccountActor \
    --member serviceAccount:$SA_EMAIL
# service account level: add role to service account
gcloud iam service-accounts get-iam-policy <sa_email>
gcloud iam service-accounts add-iam-policy-binding --member='' --role='roles/iam.serviceAccountActor'
# impersonate as a svc account terraform@${PROJECT_ID}
gcloud iam service-accounts add-iam-policy-binding  terraform@${PROJECT_ID} --role roles/iam.serviceAccountTokenCreator
gcloud container clusters list --impersonate-service-account=terraform@${PROJECT_ID}

GCS bucket level

COMPUTE_ENGINE_SA_EMAIL=$(gcloud iam service-accounts list --filter="name:Compute Engine default service account" --format "value(email)")
gsutil iam ch serviceAccount:${COMPUTE_ENGINE_SA_EMAIL}:objectViewer gs://bucket-name

Custom Roles

# list predefined roles
gcloud iam roles list
# list custom roles
gcloud iam roles list --project $PROJECT_ID

# create custom role in the following 2 ways, either on project level (--project [PROJECT_ID]) or org level (--organization [ORGANIZATION_ID])
1. gcloud iam roles create editor --project $PROJECT_ID --file role-definition.yaml
2. gcloud iam roles create viewer --project $PROJECT_ID --title "Role Viewer" --description "Custom role description." --permissions compute.instances.get,compu
te.instances.list --stage ALPHA

app engine

cloud build

# user defined
gcloud builds submit --config=cloudbuild.yaml --substitutions=_BRANCH_NAME=foo,_BUILD_NUMBER=1 .

# override built in TAG_NAME
gcloud builds submit --config=cloudbuild.yaml --substitutions=TAG_NAME=v1.0.1

Cloud build trigger GCE rolling replace/start

- name: ''
  args: [ 'build', '-t', '$PROJECT_ID/gcp-cloudbuild-gce-angular', '.' ]
- name: ''
  args: [ 'beta', 'compute', 'instance-groups', 'managed', 'rolling-action', 'restart', 'gce-angular-instance-group', '--zone=us-east1-b' ]
- '$PROJECT_ID/gcp-cloudbuild-gce-angular'


# list all keyrings 
gcloud kms keyrings list --location global
# list all keys in my_key_ring
gcloud kms keys list --keyring my_key_ring --location global

# grant KMS IAM permission to a sv account $USER_EMAIL 
gcloud kms keyrings add-iam-policy-binding $KEYRING_NAME \
    --location global \
    --member user:$USER_EMAIL \
    --role roles/cloudkms.admin
gcloud kms keyrings add-iam-policy-binding $KEYRING_NAME \
    --location global \
    --member user:$USER_EMAIL \
    --role roles/cloudkms.cryptoKeyEncrypterDecrypter
# Encrypt and Decrypt in REST API
curl -v "$DEVSHELL_PROJECT_ID/locations/global/keyRings/$KEYRING_NAME/cryptoKeys/$CRYPTOKEY_NAME:encrypt" \
  -d "{\"plaintext\":\"$PLAINTEXT\"}" \
  -H "Authorization:Bearer $(gcloud auth application-default print-access-token)"\
  -H "Content-Type:application/json" \
| jq .ciphertext -r > 1.encrypted

curl -v "$DEVSHELL_PROJECT_ID/locations/global/keyRings/$KEYRING_NAME/cryptoKeys/$CRYPTOKEY_NAME:decrypt" \
  -d "{\"ciphertext\":\"$(cat 1.encrypted)\"}" \
  -H "Authorization:Bearer $(gcloud auth application-default print-access-token)"\
  -H "Content-Type:application/json" \
| jq .plaintext -r | base64 -d    

compute engine

gcloud command for creating an instance?

from web console

gcloud compute instances create [INSTANCE_NAME] \
  --image-family [IMAGE_FAMILY] \
  --image-project [IMAGE_PROJECT] \
  --create-disk image=[DISK_IMAGE],image-project=[DISK_IMAGE_PROJECT],size=[SIZE_GB],type=[DISK_TYPE]
gcloud compute instances create micro1 --zone=us-west1-a --machine-type=f1-micro --subnet=default --network-tier=PREMIUM --maintenance-policy=MIGRATE --scopes=,,,,, --min-cpu-platform=Automatic --image=debian-9-stretch-v20180510 --image-project=debian-cloud --boot-disk-size=10GB --boot-disk-type=pd-standard --boot-disk-device-name=micro1

list compute images

gcloud compute images list --filter=name:debian --uri

# Use the following command to see available non-Shielded VM Windows Server images
gcloud compute images list --project windows-cloud --no-standard-images
# Use the following command to see a list of available Shielded VM images, including Windows images
gcloud compute images list --project gce-uefi-images --no-standard-images

list an instance

gcloud compute instances list --filter="zone:us-central1-a"
gcloud compute instances list --project=dev --filter="name~^es"
gcloud compute instances list --project=dev --filter=name:kafka --format="value(name,INTERNAL_IP)"
gcloud compute instances list --filter=tags:kafka-node
gcloud compute instances list --filter='machineType:g1-small'

move instance

gcloud compute instances move <instance_wanna_move> --destination-zone=us-central1-a --zone=us-central1-c

ssh & scp

#--verbosity=debug is great for debugging, showing the SSH command 
# the following is a real word example for running a bastion server that talks to a GKE cluster (master authorized network)
gcloud compute ssh --verbosity=debug <instance_name> --command "kubectl get nodes"

gcloud compute scp  --recurse ../manifest <instance_name>:


# find out access-config-name's name
gcloud compute instances describe oregon1
# remove the external IP
gcloud compute instances delete-access-config  oregon1 --access-config-name "External NAT"
# connect via IAP, assuming the IAP is granted to the account used for login. 
gcloud beta compute ssh oregon1 --tunnel-through-iap

ssh port forwarding for elasticsearch

gcloud compute --project "foo" ssh --zone "us-central1-c" "elasticsearch-1"  --ssh-flag="-L localhost:9200:localhost:9200"

The 2nd localhost is relative to elasticsearch-1`

ssh reverse port forwarding

for example, how to connect to home server's flask server (tcp port 5000) for a demo or a local game server in development

GOOGLE_CLOUD_PROJECT=$(gcloud config get-value project)
gcloud compute --project "${GOOGLE_CLOUD_PROJECT}" ssh --zone "us-west1-c" --ssh-flag="-v -N -R :5000:localhost:5000" "google_cloud_bastion_server"

generate ssh config

gcloud compute config-ssh


gcloud debugging: gcloud compute instances list --log-http serial port debug

instance level metadata

curl -s "" -H "Metadata-Flavor: Google"
leader=$(curl -s "" -H "Metadata-Flavor: Google")

project level metadata

gcloud compute project-info describe
gcloud compute project-info describe --flatten="commonInstanceMetadata[]"

instances, template, target-pool and instance group

cat << EOF >
#! /bin/bash
apt-get update
apt-get install -y nginx
service nginx start
sed -i -- 's/nginx/Google Cloud Platform - '"\$HOSTNAME"'/' /var/www/html/index.nginx-debian.html

gcloud compute instance-templates create nginx-template  --metadata-from-file
gcloud compute target-pools create nginx-pool
gcloud compute instance-groups managed create nginx-group \
         --base-instance-name nginx \
         --size 2 \
         --template nginx-template \
         --target-pool nginx-pool

MIG with startup and shutdown scripts

gsutil cp gs://nat-gw-template/ .

gcloud compute instance-templates create nat-1 \
    --machine-type n1-standard-2 --can-ip-forward --tags natgw \ --address $nat_1_ip

gcloud compute instance-templates create nat-2 \
    --machine-type n1-standard-2 --can-ip-forward --tags natgw \  --address $nat_2_ip

disk snapshot

gcloud compute disks snapshot kafka-data1-1 --async --snapshot-names=kafka-data-1 --project project_a --zone us-west1-a
Use [gcloud compute operations describe URI] command to check the status of the operation(s).

regional disk

 gcloud beta compute instance attach-disk micro1 --disk pd-west1 --disk-scope regional


network and subnets

 gcloud compute networks create privatenet --subnet-mode=custom
 gcloud compute networks subnets create privatesubnet-us --network=privatenet --region=us-central1 --range=
 gcloud compute networks subnets create privatesubnet-eu --network=privatenet --region=europe-west1 --range=
 gcloud compute networks subnets list --sort-by=NETWORK


tag the instances with no-ips

gcloud compute instances add-tags existing-instance --tags no-ip
gcloud compute routes create no-ip-internet-route \
    --network custom-network1 \
    --destination-range \
    --next-hop-instance nat-gateway \
    --next-hop-instance-zone us-central1-a \
    --tags no-ip --priority 800

firewall rules

# allow SSH, RDP and ICMP for the given network
gcloud compute firewall-rules create managementnet-allow-icmp-ssh-rdp --direction=INGRESS --priority=1000 --network=managementnet --action=ALLOW --rules=tcp:22,3389,icmp --source-ranges=
# allow internal from given source range
gcloud compute firewall-rules create mynetwork-allow-internal --network \
mynetwork --action ALLOW --direction INGRESS --rules all \
gcloud compute firewall-rules list --filter="network:mynetwork"

gcloud compute firewall-rules create mynetwork-deny-icmp \
--network mynetwork --action DENY --direction EGRESS --rules icmp \
--destination-ranges --priority 500
gcloud compute firewall-rules list \
--filter="network:mynetwork AND name=mynetwork-deny-icmp"

# sort-by
gcloud compute firewall-rules list --sort-by=NETWORK

layer 4 network lb

gcloud compute firewall-rules create www-firewall --allow tcp:80
gcloud compute forwarding-rules create nginx-lb \
         --region us-central1 \
         --ports=80 \
         --target-pool nginx-pool
gcloud compute firewall-rules list --sort-by=NETWORK

layer 7 http lb

gcloud compute http-health-checks create http-basic-check
gcloud compute instance-groups managed \
       set-named-ports nginx-group \
       --named-ports http:80

gcloud compute backend-services create nginx-backend \
      --protocol HTTP --http-health-checks http-basic-check --global
gcloud compute backend-services add-backend nginx-backend \
    --instance-group nginx-group \
    --instance-group-zone us-central1-a \

gcloud compute url-maps create web-map \
    --default-service nginx-backend

gcloud compute target-http-proxies create http-lb-proxy \
    --url-map web-map
gcloud compute forwarding-rules create http-content-rule \
        --global \
        --target-http-proxy http-lb-proxy \
        --ports 80
gcloud compute forwarding-rules list


gcloud compute forwarding-rules list --filter=$(dig +short <dns_name>)
gcloud compute forwarding-rules describe my-forwardingrule --region us-central1
gcloud compute forwarding-rules describe my-http-forwardingrule --global


# get the external IP address of the instance
gcloud compute instances describe single-node \
gcloud compute addresses describe https-lb --global --format json

# list all IP addresses
gcloud projects list --format='value(project_id)' | xargs -I {} gcloud compute addresses list --format='value(address)' --project {}  2>/dev/null | sort | uniq -c

GCP managed ssl certificate

gcloud beta compute ssl-certificates create example-mydomain --domains
gcloud beta compute ssl-certificates list
gcloud beta compute ssl-certificates describe example-mydomain
# It takes 30mins+ to provision the TLS, one of conditions is the target-https-proxies needs to be associated with the cert.
gcloud beta compute target-https-proxies list

StackDriver logging

gcloud logging read "timestamp >= \"2018-04-19T00:30:00Z\"  and logName=projects/${project_id}/logs/requests and resource.type=http_load_balancer" --format="csv(httpRequest.remoteIp,httpRequest.requestUrl,timestamp)" --project=${project_id}


list service available

gcloud services list --available

Enable Service

# chain 
gcloud services enable && \ && \ 

# or not chain
gcloud services enable
gcloud services enable
gcloud services enable
gcloud services enable
gcloud services enable
gcloud services enable
gcloud services enable
gcloud services enable
gcloud services enable
function enable-service() {
  if [[ $(gcloud services list --format="value(" \
                                --filter="$SERVICE" 2>&1) != \
                                "$SERVICE" ]]; then
    echo "Enabling $SERVICE"
    gcloud services enable $SERVICE
    echo "$SERVICE is already enabled"


Client libraries you can use to connect to Google APIs

chaining gcloud commands

gcloud compute forwarding-rules list --format 'value(NAME)' \
| xargs -I {}  gcloud compute forwarding-rules delete {}  --region us-west1 -q

gcloud projects list --format='value(project_id)' \
| xargs -I {} gcloud compute addresses list --format='value(address)' --project {}  2>/dev/null | sort | uniq -c

gcloud compute instances list --filter=elasticsearch --format='value(NAME)' \
| xargs -I {} -p gcloud compute instances stop {}
gcloud compute instances list --filter=elasticsearch --format='value(INTERNAL_IP)' \
| xargs -I {} ssh {} "sudo chef-client"

# delete non default routes
gcloud compute routes list --filter="NOT network=default" --format='value(NAME)' \
| xargs -I {} gcloud compute routes delete -q {}

one liner to purge GCR images given a date

gcloud container images list-tags$IMAGE --limit=unlimited --sort-by=TIMESTAMP   \
--filter="NOT tags:* AND timestamp.datetime < '${DATE}'" --format='get(digest)' | \
while read digest;do gcloud container images delete -q --force-delete-tags$IMAGE@$digest ;done


# create a private cluster
gcloud beta container clusters create private-cluster \
    --private-cluster \
    --master-ipv4-cidr \
    --enable-ip-alias \
    --create-subnetwork ""

gcloud compute networks subnets create my-subnet \
    --network default \
    --range \
    --enable-private-ip-google-access \
    --region us-central1 \
    --secondary-range my-svc-range=,my-pod-range=

gcloud beta container clusters create private-cluster2 \
    --private-cluster \
    --enable-ip-alias \
    --master-ipv4-cidr \
    --subnetwork my-subnet \
    --services-secondary-range-name my-svc-range \
    --cluster-secondary-range-name my-pod-range
 gcloud container clusters update private-cluster2 \
    --enable-master-authorized-networks \
    --master-authorized-networks <external_ip_of_kubectl_instance>
# create a GKE cluster with CloudRun,Istio, HPA enabled
gcloud beta container clusters create run-gke \
  --addons HorizontalPodAutoscaling,HttpLoadBalancing,Istio,CloudRun \
  --scopes cloud-platform \
  --zone us-central1-a \
  --machine-type n1-standard-4 \
  --enable-stackdriver-kubernetes \
# create a VPC native cluster
gcloud container clusters create k1 \
--network custom-ip-vpc --subnetwork subnet-alias \
--enable-ip-alias --cluster-ipv4-cidr=/16   --services-ipv4-cidr=/22
# get the GKE endpoint
gcloud container clusters describe mycluster --format='get(endpoint)'
# generate a ~/.kube/config for private cluster with private endpoint
gcloud container clusters get-credentials private-cluster --zone us-central1-a --internal-ip

Machine Learning

brew install bat
gcloud ml language analyze-entities --content="Michelangelo Caravaggio, Italian painter, is known for 'The Calling of Saint Matthew'." | bat  -l json

Deployment Manager

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment