Last active
August 17, 2022 00:09
-
-
Save mikesparr/89167550a80146f85525595393837c9e to your computer and use it in GitHub Desktop.
Example Cloud Armor policies protecting Google HTTPS Global Load Balancer in front of GCE instance group and GKE cluster
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env bash | |
# REF: https://cloud.google.com/armor/docs/integrating-cloud-armor#with_ingress | |
# REF: https://cloud.google.com/armor/docs/configure-security-policies | |
# REF: https://cloud.google.com/iap/docs/load-balancer-howto | |
# REF: https://cloud.google.com/sdk/gcloud/reference/compute/url-maps/add-path-matcher | |
# REF: https://cloud.google.com/load-balancing/docs/https/setting-up-url-rewrite | |
export PROJECT_ID=$(gcloud config get-value project) | |
export PROJECT_USER=$(gcloud config get-value core/account) # set current user | |
export PROJECT_NUMBER=$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)") | |
export IDNS=${PROJECT_ID}.svc.id.goog # workflow identity domain | |
export GCP_REGION="us-west4" # CHANGEME (OPT) | |
export GCP_ZONE="us-west4-a" # CHANGEME (OPT) | |
export DOMAIN="msparr.com" # CHANGEME (OPT) | |
export TEST_NS="armor" # CHANGEME (OPT) - also using for subdomain for TLS | |
export NETWORK_NAME="default" | |
# enable apis | |
gcloud services enable compute.googleapis.com \ | |
container.googleapis.com | |
# configure gcloud sdk | |
gcloud config set compute/region $GCP_REGION | |
gcloud config set compute/zone $GCP_ZONE | |
##################################################### | |
# Domain / Static IP | |
##################################################### | |
# reserve static IP | |
gcloud compute addresses create $TEST_NS-static \ | |
--ip-version=IPV4 \ | |
--global | |
# assign static IP to DNS | |
export STATIC_IP=$(gcloud compute addresses describe $TEST_NS-static --global --format="value(address)") | |
# confirm assigned IP to DNS to continue | |
while true; do | |
read -p "Did you create DNS record for $DOMAIN with $STATIC_IP? " -n 1 -r yn | |
echo | |
case $yn in | |
[Yy]* ) break;; | |
[Nn]* ) exit;; | |
* ) echo "Please answer yes or no.";; | |
esac | |
done | |
##################################################### | |
# Security Policy | |
##################################################### | |
# create security policy rule | |
export POLICY_NAME="web-traffic-policy-us" | |
gcloud compute security-policies create $POLICY_NAME \ | |
--description "Rate limit US traffic" | |
# throttle requests in US if more then 10 per minute with 403 response | |
gcloud beta compute security-policies rules create 1000 \ | |
--security-policy $POLICY_NAME \ | |
--expression "origin.region_code == 'US'" \ | |
--action rate-based-ban \ | |
--rate-limit-threshold-count 10 \ | |
--rate-limit-threshold-interval-sec 60 \ | |
--ban-duration-sec 300 \ | |
--ban-threshold-count 1000 \ | |
--ban-threshold-interval-sec 600 \ | |
--conform-action allow \ | |
--exceed-action deny-403 \ | |
--enforce-on-key ALL | |
##################################################### | |
# GKE Cluster | |
##################################################### | |
# create GKE cluster | |
export CLUSTER_NAME="west4" | |
gcloud beta container --project $PROJECT_ID clusters create $CLUSTER_NAME \ | |
--zone $GCP_REGION \ | |
--release-channel "regular" \ | |
--num-nodes "1" \ | |
--enable-ip-alias \ | |
--tags=allow-health-check | |
# deploy sample app to cluster | |
kubectl create ns $TEST_NS | |
kubectl create deployment kuard \ | |
--image=gcr.io/kuar-demo/kuard-amd64:blue \ | |
-n $TEST_NS | |
# create backend config linked to security policy | |
cat <<EOF | kubectl apply -f - | |
apiVersion: cloud.google.com/v1 | |
kind: BackendConfig | |
metadata: | |
name: my-backendconfig | |
namespace: $TEST_NS | |
spec: | |
securityPolicy: | |
name: $POLICY_NAME | |
EOF | |
# expose app with NEG service | |
cat <<EOF | kubectl apply -f - | |
apiVersion: v1 | |
kind: Service | |
metadata: | |
name: kuard-svc | |
namespace: $TEST_NS | |
annotations: | |
cloud.google.com/neg: '{"exposed_ports": {"80":{}}}' | |
cloud.google.com/backend-config: '{"default": "my-backendconfig"}' | |
labels: | |
app: kuard | |
spec: | |
type: ClusterIP | |
selector: | |
app: kuard | |
ports: | |
- port: 80 | |
targetPort: 8080 | |
protocol: TCP | |
EOF | |
##################################################### | |
# GCE VMs | |
##################################################### | |
# create MIG template for Apache web servers | |
export TEMPLATE_NAME="apache-template" | |
export GROUP_NAME="apache-web" | |
gcloud compute instance-templates create $TEMPLATE_NAME \ | |
--region=$GCP_REGION \ | |
--network=default \ | |
--subnet=default \ | |
--tags=allow-health-check \ | |
--image-family=debian-9 \ | |
--image-project=debian-cloud \ | |
--metadata=startup-script='#! /bin/bash | |
apt-get update | |
apt-get install apache2 -y | |
a2ensite default-ssl | |
a2enmod ssl | |
vm_hostname="$(curl -H "Metadata-Flavor:Google" \ | |
http://169.254.169.254/computeMetadata/v1/instance/name)" | |
echo "Page served from: $vm_hostname" | \ | |
tee /var/www/html/index.html | |
systemctl restart apache2' | |
gcloud compute instance-groups managed create $GROUP_NAME \ | |
--template=$TEMPLATE_NAME --size=2 --zone=$GCP_ZONE | |
gcloud compute instance-groups set-named-ports $GROUP_NAME \ | |
--named-ports http:80 \ | |
--zone $GCP_ZONE | |
##################################################### | |
# Load Balancing | |
##################################################### | |
# create health check | |
gcloud compute health-checks create http health-check-$TEST_NS \ | |
--use-serving-port \ | |
--request-path="/" | |
# create firewall rules for health checks | |
gcloud compute firewall-rules create fw-allow-health-checks \ | |
--network=$NETWORK_NAME \ | |
--action=ALLOW \ | |
--direction=INGRESS \ | |
--source-ranges=35.191.0.0/16,130.211.0.0/22 \ | |
--target-tags=allow-health-check \ | |
--rules=tcp | |
# create backend service (cluster) | |
gcloud compute backend-services create backend-service-$CLUSTER_NAME \ | |
--global \ | |
--enable-logging \ | |
--logging-sample-rate=1.0 \ | |
--health-checks health-check-$TEST_NS | |
# create backend service (VMs) | |
gcloud compute backend-services create backend-service-$GROUP_NAME \ | |
--global \ | |
--enable-logging \ | |
--logging-sample-rate=1.0 \ | |
--protocol=HTTP \ | |
--port-name=http \ | |
--health-checks health-check-$TEST_NS | |
# add security policy to cluster backend | |
gcloud compute backend-services update backend-service-$CLUSTER_NAME \ | |
--global \ | |
--security-policy $POLICY_NAME | |
# add security policy to MIG backend | |
gcloud compute backend-services update backend-service-$GROUP_NAME \ | |
--global \ | |
--security-policy $POLICY_NAME | |
# verify security policy applied to backend | |
gcloud compute backend-services describe backend-service-$CLUSTER_NAME --format="value(securityPolicy)"\ | |
--global | |
gcloud compute backend-services describe backend-service-$GROUP_NAME --format="value(securityPolicy)"\ | |
--global | |
# add NEG to backend service for each zone (assumes jq installed) | |
export NEG_NAME=$(kubectl get svc kuard-svc -n $TEST_NS -o jsonpath="{.metadata.annotations.cloud\.google\.com/neg-status}" | jq '.network_endpoint_groups | {name: .["80"]}' | jq .name -r) | |
kubectl get svc kuard-svc -n $TEST_NS -o jsonpath="{.metadata.annotations.cloud\.google\.com/neg-status}" \ | |
| jq '.zones | {name: .[]}' | jq .name \ | |
| xargs -I {} gcloud compute backend-services add-backend backend-service-$CLUSTER_NAME \ | |
--global \ | |
--network-endpoint-group $NEG_NAME \ | |
--network-endpoint-group-zone={} \ | |
--balancing-mode=RATE \ | |
--max-rate-per-endpoint=100 | |
# add MIG to backend service | |
gcloud compute backend-services add-backend backend-service-$GROUP_NAME \ | |
--instance-group=$GROUP_NAME \ | |
--instance-group-zone=$GCP_ZONE \ | |
--global | |
# create URL map | |
gcloud compute url-maps create ${TEST_NS}-url-map \ | |
--global \ | |
--default-service backend-service-${CLUSTER_NAME} | |
# create path matcher (route requests to different backends based on path) | |
gcloud compute url-maps add-path-matcher $TEST_NS-url-map \ | |
--path-matcher-name=web-matcher \ | |
--default-service=backend-service-${CLUSTER_NAME} \ | |
--backend-service-path-rules="/web/*=backend-service-$GROUP_NAME" | |
# add rewrite for web server so /web/ translates into / | |
export URL_MAP_CONFIG="https-lb.yaml" | |
cat > $URL_MAP_CONFIG <<EOF | |
defaultService: https://www.googleapis.com/compute/v1/projects/$PROJECT_ID/global/backendServices/backend-service-$CLUSTER_NAME | |
hostRules: | |
- hosts: | |
- '*' | |
pathMatcher: web-matcher | |
name: armor-url-map | |
pathMatchers: | |
- defaultService: https://www.googleapis.com/compute/v1/projects/$PROJECT_ID/global/backendServices/backend-service-$CLUSTER_NAME | |
name: web-matcher | |
pathRules: | |
- paths: | |
- /web/* | |
routeAction: | |
urlRewrite: | |
pathPrefixRewrite: / | |
service: https://www.googleapis.com/compute/v1/projects/$PROJECT_ID/global/backendServices/backend-service-$GROUP_NAME | |
EOF | |
# validate url map | |
gcloud compute url-maps validate --source $URL_MAP_CONFIG | |
# update url map | |
gcloud compute url-maps import $TEST_NS-url-map \ | |
--source $URL_MAP_CONFIG \ | |
--global | |
# create managed SSL cert | |
gcloud beta compute ssl-certificates create $TEST_NS-cert \ | |
--domains "$TEST_NS.$DOMAIN" | |
# create target HTTPS proxy | |
gcloud compute target-https-proxies create $TEST_NS-https-proxy \ | |
--ssl-certificates=$TEST_NS-cert \ | |
--url-map=$TEST_NS-url-map | |
gcloud compute forwarding-rules create $TEST_NS-fw-rule \ | |
--target-https-proxy=$TEST_NS-https-proxy \ | |
--global \ | |
--ports=443 \ | |
--address=$TEST_NS-static | |
# verify cert (may take 10-20 minutes) | |
gcloud beta compute ssl-certificates describe $TEST_NS-cert | |
# verify security policy was enforce | |
export LOG_FILTER="resource.type=\"http_load_balancer\" "\ | |
"jsonPayload.enforcedSecurityPolicy.name=\"${POLICY_NAME}\" "\ | |
"jsonPayload.enforcedSecurityPolicy.outcome=\"THROTTLE\"" | |
gcloud logging read --project $PROJECT_ID "$LOG_FILTER" |
Optional enable logging on backend
You can enable logging and desired sampling rate on each backend (via console as well)
Verifying Cloud Armor policy enforcement
As described in https://cloud.google.com/armor/docs/troubleshooting you can filter logs in Cloud Logging to view the enforcedSecurityPolicy
events like below and in code snippet.
# verify security policy was enforce
export LOG_FILTER="resource.type=\"http_load_balancer\" "\
"jsonPayload.enforcedSecurityPolicy.name=\"${POLICY_NAME}\" "\
"jsonPayload.enforcedSecurityPolicy.outcome=\"THROTTLE\""
gcloud logging read --project $PROJECT_ID "$LOG_FILTER"
Web console verification
Considerations
- set up private GKE cluster and Cloud NAT for egress traffic
- disable external IPs on compute instances (ideally with org policy beforehand)
- disable default networks and create own network/subnets (ideally with shared VPC host project and service projects)
Other resources
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Global Load Balancing With Cloud Armor Example
The example above configures a security policy and applies that policy both to GKE cluster service (using
BackendConfig
) and a GCE managed instance group of VMs (attaching to backend). The goal is to illustrate how you can leverage a global HTTPS external load balancer on Google Cloud and let it route traffic to various backends on VMs and Kubernetes.Load balancer config (in console)
Browser tests
Success
First the paths returned expected apps both on GKE and GCE.
Failure (expected)
After refreshing the page 10 times within 1 minute, it triggered the App Armor policies for both respectively