Skip to content

Instantly share code, notes, and snippets.

@adrienjoly
Last active August 31, 2020 12:49
Embed
What would you like to do?
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: rabbitmq
spec:
replicas: 1
template:
metadata:
labels:
app: rabbitmq
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 60
containers:
- name: rabbitmq
image: rabbitmq:3-management
imagePullPolicy: Always
ports:
- containerPort: 5672
- containerPort: 15672
---
apiVersion: v1
kind: Service
metadata:
name: rabbitmq
# => exports env var RABBITMQ_SERVICE_HOST (IP address)
spec:
type: NodePort
ports:
- port: 5672
name: 'main'
# => exports env var RABBITMQ_SERVICE_PORT_MAIN=5672
- port: 15672
name: 'ui'
# => exports env var RABBITMQ_SERVICE_PORT_UI=15672
selector:
app: rabbitmq
env:
- name: JS_PROXY_URL_PREFIX
value: "http://$(RENDERTRON_SERVICE_HOST):$(RENDERTRON_SERVICE_PORT)/render/"
volumes:
- name: volume-service-account
secret:
defaultMode: 420
secretName: service-account-for-crawler-deployments
containers:
- name: crawler-web
# [...]
volumeMounts:
- mountPath: /crawler-secrets
name: volume-service-account
readOnly: true
envFrom:
- secretRef:
name: crawler-creds-slack
optional: true
env:
# [...]
#!/usr/bin/env bash
echo "ℹ️ This script will stop the crawler, apply db migrations, and then restart the crawler."
MANAGER_IMAGE="latest"
WORKER_REPLICAS=$(kubectl get deployment crawler-worker -o=jsonpath='{$.spec.replicas}')
echo "(i) Found ${WORKER_REPLICAS} worker replicas"
echo "Shutting down crawler-manager, crawler-web and crawler-worker..."
kubectl scale deployment crawler-manager crawler-web crawler-worker --replicas=0
echo "Setting up and running the db-migrate job..."
sed "s,_DOCKER_IMAGE_,${MANAGER_IMAGE},g" ./on-demand/db-migrate.template.yml >./on-demand/db-migrate.yml
./k8s-run-job.sh db-migrate
rm ./on-demand/db-migrate.yml
echo "Restarting crawler-manager, crawler-web and crawler-worker..."
kubectl scale deployment crawler-manager crawler-web --replicas=1
kubectl scale deployment crawler-worker --replicas=${WORKER_REPLICAS}
echo "✅ Done."
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: crawler-ingress
annotations:
kubernetes.io/ingress.class: "gce" # we use Google's load balancer for ingress traffic
spec:
# access thru ip address
backend:
serviceName: crawler-web
servicePort: 8000
# access thru domain name
rules:
- host: crawler.algolia.com
http:
paths:
- backend:
serviceName: crawler-web
servicePort: 8000
path: /*
#!/usr/bin/env bash
# This script runs a proxy to give access to our CloudSQL database.
# While it's running, any postgres client (e.g. psql) can connect thru 127.0.0.1:5432.
# Expected inputs:
# - CLOUDSQL_INSTANCE_NAME: e.g. "crawler-pgsql"
# - PGSQL_CREDS_PATH: path where your service account JSON file is stored, e.g. "$HOME/crawler/secrets"
# - PGSQL_CREDS_FILENAME: name of your service account JSON file, e.g. "crawler-pgsql-service-account.json"
if [ -z "$CLOUDSQL_INSTANCE_NAME" ]; then
echo "missing env var: CLOUDSQL_INSTANCE_NAME (e.g. \"crawler-pgsql\")"; exit 1; fi;
if [ -z "$PGSQL_CREDS_PATH" ]; then
echo "missing env var: PGSQL_CREDS_PATH (e.g. \"$HOME/crawler/secrets\")"; exit 1; fi;
if [ -z "$PGSQL_CREDS_FILENAME" ]; then
echo "missing env var: PGSQL_CREDS_FILENAME (e.g. \"crawler-pgsql-service-account.json\")"; exit 1; fi;
GOOGLE_PROJECT="crawler"
ZONE="us-central1"
CLOUDSQL_INSTANCE="$GOOGLE_PROJECT:$ZONE:$CLOUDSQL_INSTANCE_NAME"
echo "Connecting to $CLOUDSQL_INSTANCE thru CloudSQL proxy on 127.0.0.1"
echo "Filepath to service account file: $PGSQL_CREDS_PATH/$PGSQL_CREDS_FILENAME"
docker run -v $PGSQL_CREDS_PATH:/conf -p 127.0.0.1:5432:5432 \
gcr.io/cloudsql-docker/gce-proxy:1.11 /cloud_sql_proxy \
-credential_file=/conf/$PGSQL_CREDS_FILENAME \
-instances=$CLOUDSQL_INSTANCE=tcp:0.0.0.0:5432
@adrienjoly
Copy link
Author

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment