Setup
bin/kafka-topics.sh \
--zookeeper zookeeper.example.com:2181 \
--create \
import sys | |
class MalFriendInterface: | |
def __init__(self, MallikCount, MalFriendName): | |
self.MallikCount = MallikCount | |
self.MalFriendName = MalFriendName | |
def mallik(self): | |
pass | |
class MalFriendExecutor(MalFriendInterface): |
# The goal: create a list of maps of subnet mappings so we don't have to statically hard-code them in aws_lb | |
# https://www.terraform.io/docs/providers/aws/r/lb.html#subnet_mapping | |
locals { | |
# These represent dynamic data we fetch from somewhere, such as subnet IDs and EIPs from a VPC module | |
subnet_ids = ["subnet-1", "subnet-2", "subnet-3"] | |
eips = ["eip-1", "eip-2", "eip-3"] | |
} | |
# Here's the hack! The null_resource has a map called triggers that we can set to arbitrary values. | |
# We can also use count to create a list of null_resources. By accessing the triggers map inside of |
FROM golang:1.8.3-alpine3.6 | |
RUN apk --no-cache add curl \ | |
&& echo "Pulling watchdog binary from Github." \ | |
&& curl -sSL https://github.com/openfaas-incubator/of-watchdog/releases/download/0.2.2/of-watchdog > /usr/bin/fwatchdog \ | |
&& chmod +x /usr/bin/fwatchdog \ | |
&& apk del curl --no-cache | |
WORKDIR /go/src/handler | |
COPY . . |
const fs = require('fs'); | |
const Converter = require('./converter.js'); | |
const options = { | |
name: process.argv[3], | |
description: process.argv[4], | |
activate: process.argv[5], | |
}; | |
const converter = new Converter(JSON.parse(fs.readFileSync(process.argv[2])), options); |
# di_terraform.tf | |
# Example Terraform configuration that shows de-coupling resources (Amazon S3 bucket and bucket policy) | |
# with the help of modules analogous to dependency injection technique | |
# modules/s3_bucket/main.tf | |
# Base S3 bucket that can be attached a policy | |
variable "bucket_name" {} | |
variable "region" { |
#!/bin/bash | |
# Input: ./extract_kubecfg_cert.sh my-cluster-name username | |
# Output: ./my-cluster-name-ca.crt ./username.crt ./username.key | |
# Exit on error | |
abort(){ | |
echo $1 && exit 1 | |
} | |
# Prerequistes |
kubeadm init --use-kubernetes-version=v1.5.6 |
s3 supports encryption at rest, but there isn't an option in s3 that automatically encrypts all files that are uploaded, so you need to specify encryption manually when you upload them.
I normally upload files to s3 from the CLI like so using the --sse AES256
flag:
aws s3 cp ./your-local-file.txt \
s3:////your-local-file.txt \