Skip to content

Instantly share code, notes, and snippets.

@so0k

so0k/Makefile Secret

Last active October 24, 2016 15:59
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save so0k/351ef49615fba3ca41c3bda49c060df7 to your computer and use it in GitHub Desktop.
Save so0k/351ef49615fba3ca41c3bda49c060df7 to your computer and use it in GitHub Desktop.
DockerCon Recap - Demo Script
variable "access_key" {}
variable "secret_key" {}
variable "public_key_path" {
description = <<DESCRIPTION
Path to the SSH public key to be used for authentication.
Ensure this keypair is added to your local SSH agent so provisioners can
connect.
Example: ~/.ssh/terraform.pub
DESCRIPTION
}
variable "key_name" {
description = "Desired name of AWS key pair"
}
variable "region" {
default = "ap-southeast-1"
}
# This is the ubuntu 15.10 image for <region>, amd64, hvm:ebs-ssd
variable "amis" {
default = {
ap-southeast-1 = "ami-60975903"
ap-northeast-1 = "ami-3355505d"
ap-northeast-2 = "ami-e427e98a"
eu-central-1 = "ami-6da2ba01"
}
}
provider "aws" {
access_key = "${var.access_key}"
secret_key = "${var.secret_key}"
region = "${var.region}"
}
# Create a VPC to launch our instances into
resource "aws_vpc" "default" {
cidr_block = "10.0.0.0/16"
}
# Create an internet gateway to give our subnet access to the outside world
resource "aws_internet_gateway" "default" {
vpc_id = "${aws_vpc.default.id}"
}
# Grant the VPC internet access on its main route table
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.default.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.default.id}"
}
# Create a subnet to launch our instances into
resource "aws_subnet" "default" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.1.0/24"
map_public_ip_on_launch = true
}
# A security group for the ELB so it is accessible via the web
resource "aws_security_group" "elb" {
name = "ecs_sample_elb"
description = "Used by ecs sample"
vpc_id = "${aws_vpc.default.id}"
# HTTP access from anywhere
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# outbound internet access
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# Our default security group to access
# the instances over SSH and HTTP
resource "aws_security_group" "default" {
name = "ecs_example"
description = "Used by ecs sample"
vpc_id = "${aws_vpc.default.id}"
# SSH access from anywhere
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# ANY inbound within VPC (For demo purposes only!)
ingress {
from_port = 0
to_port = 0
protocol = -1
cidr_blocks = ["10.0.0.0/16"]
}
# ANY outbound within VPC (For demo purposes only!)
egress {
from_port = 0
to_port = 0
protocol = -1
cidr_blocks = ["10.0.0.0/16"]
}
#Ideally following ports are required:
#TCP port 2377 for cluster management communications
#TCP and UDP port 7946 for communication among nodes
#TCP and UDP port 4789 for overlay network traffic
# outbound internet access
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
# 8080 is load balanced across cluster nodes
ingress {
from_port = 8080
to_port = 8080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# 3000 node visualizer
ingress {
from_port = 3000
to_port = 3000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elb" "web" {
name = "ecs-sample-elb"
subnets = ["${aws_subnet.default.id}"]
security_groups = ["${aws_security_group.elb.id}"]
# The same availability zone as our instances
# availability_zones = ["${aws_instance.node.0.availability_zone}"]
#magically register all instances:
instances = ["${aws_instance.node.*.id}"]
listener {
instance_port = 8080
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8080/"
interval = 30
}
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
}
resource "aws_lb_cookie_stickiness_policy" "default" {
name = "lbpolicy"
load_balancer = "${aws_elb.web.id}"
lb_port = 80
cookie_expiration_period = 600
}
resource "aws_key_pair" "auth" {
key_name = "${var.key_name}"
public_key = "${file(var.public_key_path)}"
}
resource "aws_instance" "node" {
# This will create 3 instances
count = 3
# The connection block tells our provisioner how to
# communicate with the resource (instance)
connection {
# The default username for our AMI
user = "ubuntu"
# The connection will use the local SSH agent for authentication.
}
instance_type = "t2.micro"
# Lookup the correct AMI based on the region
# we specified
ami = "${lookup(var.amis, var.region)}"
# The name of our SSH keypair we created above.
key_name = "${aws_key_pair.auth.id}"
# Our Security group to allow HTTP and SSH access
vpc_security_group_ids = ["${aws_security_group.default.id}"]
# We're going to launch into the same subnet as our ELB. In a production
# environment it's more common to have a separate private subnet for
# backend instances.
subnet_id = "${aws_subnet.default.id}"
# We provision docker 1.12 RC:
/*
provisioner "remote-exec" {
inline = [
"curl -sSL https://test.docker.com/ | sh"
]
}
*/
provisioner "remote-exec" {
inline = [
"curl -sSL https://experimental.docker.com/ | sh"
]
}
}
output "address" {
value = "${aws_elb.web.dns_name}"
}
output "nodes" {
value = "${join(",", aws_instance.node.*.public_ip)}"
}
version: '2'
services:
app:
image: so0k/ecs-sample:1.0.0
entrypoint: /app/ecs-sample
env_file: .env
networks:
front-tier:
aliases:
- apps
back-tier:
ports:
- "8080:80"
mongo:
image: mongo:3.2.6
container_name: mongo
volumes:
- mongodata:/data/db
- mongoconfig:/data/configdb
networks:
- back-tier
redis:
image: redis:3.2.0-alpine
container_name: redis
networks:
- back-tier
volumes:
mongodata:
mongoconfig:
networks:
back-tier:
front-tier:
version: '2'
services:
lb:
container_name: lb
build: lb
restart: unless-stopped
ports:
- "80:80"
networks:
- front-tier
app:
image: so0k/ecs-sample:1.0.0
entrypoint: /app/ecs-sample
env_file: .env
networks:
front-tier:
aliases:
- apps
back-tier:
expose:
- 80
depends_on:
- lb
mongo:
image: mongo:3.2.6
container_name: mongo
volumes:
- mongodata:/data/db
- mongoconfig:/data/configdb
networks:
- back-tier
redis:
image: redis:3.2.0-alpine
container_name: redis
networks:
- back-tier
volumes:
mongodata:
mongoconfig:
networks:
back-tier:
front-tier:

This is a work in progress - update to so0k/ecs-sample repository

Slides from meetup

Overview

In this sample Repo we will use Docker to quickly and easily get started with and scale a Real-Time messaging app written in Golang.

Screenshot of app

As a reference, a very lightweight app from the Toptal: Going Real-Time with Redis Pub/Sub article was used. The code has slightly been modified to better demonstrate Docker related concepts and is fully included in this repository.

Changes to the original application:

  • Added additional logging
  • Added indicator which application container is serving the client
  • Added exponential back-off as a best practice for applications running in the cloud.

The application allows users to upload images and see real time comments on those images. Clicking the image will show indicators where the image was clicked for every user. All this functionality was written by the Toptal developer.

To implement the above functionality, the following stack will be used:

  • AWS S3: To store the user-uploaded images.
  • MongoDB: As a Document Oriented Database keeping track of images stored on S3 and the comments of users.
  • Redis Pub/Sub: Redis as a Publish/Subscribe messaging system to propagate real time updates
  • App: the Golang application to serve the webpage and manage the websockets with client browsers
  • Nginx: As a load balancer to easily scale the application horizontally.

Note: Nginx is used as a load balancer while running the full stack locally, in a production environment a more robust load balancer setup should be considered.

Note IAM permissions for EC2, ECS and CloudFormation are required to follow along this guide.

Development environment:

  • Tested on OSX El Capitan with Bash
  • Install Docker For Mac
  • Ensure you have a working AWS Account (we will do sample S3 setup as part of these instructions)
  • Download jq to easily work with AWS resources from the CLI

Setting up S3

Install AWS-CLI (You will need access keys to use the cli)

pip install awscli
aws configure

Ensure jq is working properly:

jq --version

Expected Output (similar to this):

jq-1.5

Note this should be put into Terraform!

Create an account to give S3 access to the Application (Don't use root account)

aws iam create-user --user-name sample_app

Create Access Key and save to .env file:

aws iam create-access-key --user-name sample_app | jq -r '"AWS_ACCESS_KEY_ID=\(.AccessKey.AccessKeyId)","AWS_SECRET_ACCESS_KEY=\(.AccessKey.SecretAccessKey)"' >> .env

Create S3 Bucket (You will need to change instructions according to your bucket name, samples here use ecs-sample)

aws s3 mb s3://ecs-sample --region ap-southeast-1

Add your S3 bucket name to your .env file:

echo "S3_BUCKET_NAME=ecs-sample" >> .env

Create Policy Document for S3 Bucket

cat - << EOF > SampleAppS3Policy.json
{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": "s3:*",
            "Resource": [
                "arn:aws:s3:::ecs-sample/",
                "arn:aws:s3:::ecs-sample/*"
            ]
        }
    ]
}
EOF

Upload Policy Document

policy_arn=`aws iam create-policy --policy-name MyAppS3Access --description "Give S3 Access to ecs-sample bucket" --policy-document file://SampleAppS3Policy.json | jq -r '.Policy.Arn'`

Attach Policy Document to sample_app user

aws iam attach-user-policy --user-name sample_app --policy-arn $policy_arn

Getting to know Docker for Mac

Once Docker for Mac has been installed and is running, confirm everything is installed correctly:

docker version

Expected output (similar to this)

Client:
 Version:      1.11.2
 API version:  1.23
 Go version:   go1.5.4
 Git commit:   b9f10c9
 Built:        Wed Jun  1 21:20:08 2016
 OS/Arch:      darwin/amd64

Server:
 Version:      1.11.2
 API version:  1.23
 Go version:   go1.5.4
 Git commit:   56888bf
 Built:        Mon Jun  6 23:57:32 2016
 OS/Arch:      linux/amd64
docker-compose version

Expected output (similar to this)

docker-compose version 1.7.1, build 0a9ab35
docker-py version: 1.8.1
CPython version: 2.7.9
OpenSSL version: OpenSSL 1.0.1j 15 Oct 2014

docker "Hello world!"

docker run hello-world

Expected output:

...
a9d36faac0fe: Pull complete
Digest: sha256:e52be8ffeeb1f374f440893189cd32f44cb166650e7ab185fa7735b7dc48d619
Status: Downloaded newer image for hello-world:latest

Hello from Docker.
This message shows that your installation appears to be working correctly.

To generate this message, Docker took the following steps:
 1. The Docker client contacted the Docker daemon.
 2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
 3. The Docker daemon created a new container from that image which runs the
    executable that produces the output you are currently reading.
 4. The Docker daemon streamed that output to the Docker client, which sent it
    to your terminal.

To try something more ambitious, you can run an Ubuntu container with:
 $ docker run -it ubuntu bash

Share images, automate workflows, and more with a free Docker Hub account:
 https://hub.docker.com

For more examples and ideas, visit:
 https://docs.docker.com/engine/userguide/

At this point, you may read the userguide linked above. Concepts will be explained as they are encountered in this guide as well.

Getting to know Redis

Once Docker is installed, you will never have to install packages on your machine to play with interesting technology again. You can simple run the service in a container and remove every trace of it when done.

First, lets spin up a Daemonized (-d) redis container (named redis-test):

docker run -d --name redis-test redis:3.2.0-alpine

Verify the container is running:

docker ps

To play with this redis, we need the redis-cli, but we do not need to install it on our machine as redis-cli is bundled in the redis container!

Get a shell (-it) in a 2nd redis container linked (--link) to the first:

docker run -it --rm --link redis-test redis:3.2.0-alpine /bin/sh

From within this container, connect to redis server (-h redis-test):

redis-cli -h redis-test

Test Redis

SET lives 9
INCR lives
GET lives

Let's play with Pub/Sub features of Redis:

SUBSCRIBE channel

Launch a 2nd redis container (re-use the exact same command to launch the container from above in a separate terminal)

redis-cli -h redis-test

Publish data from 2nd container to channel

PUBLISH channel "hello from container2"

You should see the message broadcasted to all subscribed clients. Notice that once a connection is in subscription mode, you can no longer use it to send messages on. To both send and receive, 2 separate connections to the redis server are required.

Getting to know MongoDB

Very similar to the Redis experiment above, quickly launch a MongoDB server:

Launch a Daemonised Mongo container:

docker run -d --name mongo-test mongo:3.2.6

Launch a container to play with the mongo shell:

docker run -it --rm --link mongo-test mongo:3.2.6 /bin/bash

Connect via Mongo shell

mongo mongo-test:27017

Insert sample documents

db.messages.insert(
{
    "message": "hello",
    "sender": "me"
})

db.messages.insert(
{
    "message": "world",
    "sender":"you"
})

Select sample messages

db.messages.find()

Select sample messages with a conditions document

db.messages.find( { "sender": "you" })

Create an ascending index on sender field of the messages collection

db.messages.createIndex({"sender": 1})

Playing with the full application stack

The application is written in Golang. all dependencies have been vendored in with Godeps. However, to play with the application, golang does not have to be installed locally. Everything is handled through Docker.

Let's first clean up the redis-test and mongo-test containers:

docker stop redis-test mongo-test && docker rm redis-test mongo-test

Clone the application:

git clone https://github.com/so0k/ecs-sample.git

Note: Once you are required to develop further, setting up a local golang environment and cloning the application under the correct path is still easy and possible.

Build the application (using Docker)

make

Note: This makefile is inspired by Nicola Paolucci's article.

The full application stack is defined in a declarative docker-compose.yaml file at the root of this repository.

The Environment configuration for our application is stored in the .env file we have incrementally been creating in the above setup steps. Docker Compose will pass all these parameters from the .env file to our application via ENVIRONMENT VARIABLES.

Two parameters are still missing, add these as follows:

echo "MONGO_URL=mongodb://mongo/ecs-sample" >> .env
echo "REDIS_URL=redis://redis" >> .env

Note: We have defined the MongoDB hostname as mongo and the Redis hostname as redis in the docker-compose file.

Let's watch running containers with the following command in a separate terminal:

watch -n 1 "docker ps --format='table{{.Image}}\t{{.Names}}\t{{.Status}}\t{{.Ports}}\t{{.ID}}'"

We are now ready to stand up the application stack:

docker-compose up -d

Once all containers are running, you should be able to open localhost:80

Try to scale the application:

docker-compose scale app=5

Opening multiple browsers should demonstrate that client sessions are load balanced to separate application servers behind the load balancers and that all real-time events are propagated across the cluster.

Get a shell on one of the running application containers:

docker exec -it ecssample_app_1 /bin/sh

Review the DNS records published by the Docker Engine:

nslookup apps

We can ping mongo and redis hosts from app container:

ping mongo
ping redis

Our mongo and redis containers are isolated from the load balancer:

Get a shell on the running load balancer.

docker exec -it lb /bin/sh

We can ping apps:

ping apps

We can not ping mongo or redis hosts

ping mongo
ping redis

Clean up

docker-compose stop
docker-compose rm

Notes:

  • The application uses DNS round robin for the nginx load balancer to test scaling locally
  • A so0k/ecs-sample:dev container is available to play with the application source code

Run the Dev container as follows:

docker run -it --name ecs-sample-dev -v $PWD:/go/src/github.com/so0k/ecs-sample so0k/ecs-sample:dev

Understanding Container Distribution

The container images built as part of this demonstration are currently only available on our host. An important component of the container eco-system is having the ability to ship these images to different hosts.

Similar to the concept of Software Repositories, container image repositories are designed for the purpose of delivering container images to hosts. In the Docker ecosystem, distribution is managed through a Registry service. Several Registry service offerings are available:

  1. Docker Hub: This is the public Registry provided by Docker and the default registry used by every Docker client. The mongo, redis and nginx images we ran earlier were all obtained from the Docker Hub. Public Repositories on the Docker Hub are free (Similar to Public Repositories on GitHub).

  2. Self-Hosted Registry: An open source version of the Registry is available on GitHub. This allows anyone to host Docker Images privately, with the overhead of configuring and maintaining the service.

  3. Amazon ECR: Amazon ECR is integrated with Amazon ECS and eliminates the need to operate your own container repositories or worry about scaling the underlying infrastructure. Integration with IAM provides resource-level control of each repository. You pay only for the amount of data you store in your repositories and data transferred to the internet.

For this sample application, a public repository on the Docker Hub was used, following these steps:

  1. Create a Docker Hub account by signing up. Similar to GitHub, credentials were not required to pull images, however they are required to push images.

  2. Provide your docker client with your account credentials:

    docker login
  3. Review the repository and image name in the Makefile provided with this repository (change to match your Docker Hub account and rebuild image if needed)

  4. You may use the make push target to tag and push the container image to the Docker Hub:

    make push

Note Changing the Repository and Image name in the Makefile will also require you to revise the docker-compose.yaml. The changes required to this file are not covered in the current version of this guide, PR's are welcome.

Docker 1.12 Introduction

Using Docker For Mac:

docker swarm init
docker service create ...

Deploying 3 node cluster on EC2

Provision the AWS infrastructure Using terraform

Create a terraform.tfvars file at the root of the directory with you AWS keys

Sample file:

access_key = "<SAMPLE>"
secret_key = "<SAMPLE>"

public_key_path = "~/.ssh/id_rsa.pub"
key_name = "ecs-sample"

Review the infrastructure defined in docker-cluster.tf of this repository:

terraform plan

Create the cluster on AWS:

terraform apply

After it completes, it should have returned a comma separated list of the nodes.

You may extract this list again from the local terraform state as follows:

terraform output nodes

Create the Docker 1.12 Cluster

ssh to the nodes and let the engines form a swarm

ssh ubuntu@<first-node-ip>
sudo -i
docker swarm init

Note private IP of Leader node

Run the node visualizer

docker run -it -d -p 3000:3000 -e HOST=<pub-ip> -e PORT=3000 -v /var/run/docker.sock:/var/run/docker.sock manomarks/visualizer
ssh ubuntu@<other-nodes>
sudo docker swarm join <leader-priv-ip>:2377

Now, from the swarm leader:

docker node ls

Play with Docker Service concept

From the master node:

docker service create --replicas 1 --name helloworld alpine ping docker.com
docker service ls
docker service inspect --pretty helloworld
docker service scale helloworld=5

To see which nodes are running the tasks:

docker service tasks helloworld

To delete this service

docker service rm helloworld

See also:

  • docker service create --mode=global: services required on every node
  • docker service create --constraint com.example.storage=ssd: assumes docker daemon --label com.example.storage=ssd
  • Bring node down for maintenance

Create Distributed Application Bundle (DAB)

See docker/experimental/dab

docker-compose bundle

Note: the .env file is stored within the bundle (this may expose certain secrets) Ideally we'd use Instance Roles in our Terraform plan for the EC2 Instances created instead of bundling the S3 access keys.

Note: In docker-compose 1.8 it creates dsb file, just rename to dab

Next, on the cluster, deploy the app:

docker deploy ecssample

List the stack tasks:

docker stack tasks ecssample
...

Work in progress: Scale the app, expose the app through the ELB provisioned by Terraform...

REPOSITORY=so0k
IMAGE=ecs-sample
VERSION=1.0.0
default: builddocker
unixsetup:
go get github.com/tools/godep
go get golang.org/x/sys/unix
buildgo:
CGO_ENABLED=0 GOOS=linux go build -ldflags "-s" -a -installsuffix cgo -o /ecs-sample ./cmd/ecs-sample
builddocker:
docker build -t ${REPOSITORY}/${IMAGE}:dev -f ./dev.Dockerfile .
docker run -t ${REPOSITORY}/${IMAGE}:dev /bin/true
docker cp `docker ps -q -n=1`:/ecs-sample .
docker rm `docker ps -q -n=1`
chmod 755 ./ecs-sample
docker build --rm=true --tag=${REPOSITORY}/${IMAGE}:${VERSION} -f alpine.Dockerfile .
push:
docker push ${REPOSITORY}/${IMAGE}:${VERSION}
up: buildocker
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment