Skip to content

Instantly share code, notes, and snippets.

@bradfitz
Created May 18, 2021 22:56
Show Gist options
  • Save bradfitz/2815752fac272900d2e1ecda6cc50f41 to your computer and use it in GitHub Desktop.
Save bradfitz/2815752fac272900d2e1ecda6cc50f41 to your computer and use it in GitHub Desktop.
// homerunner is Brad's shitty Docker wrapper after he got tired of running
// HA nine-VM Kubernetes clusters. Earlier versions of this tried to use podman
// and fancy cloud-init and CNI stuff but then I decided to go to the other
// extreme and write something super specific to what I need and super dumb:
// run my containers from gcr.io, and use my home Ceph cluster for mounts/state.
//
// This primarily runs Home Assistant, HomeSeer, an MQTT server, and some cameras.
// And some omitted misc stuff.
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"strings"
)
type container struct {
name string
image string
args []string // optional args to add
ip string
mounts []mount
ports []port // to expose
}
type mount struct {
// Either RBD fields must be set,
rbdPool string
rbdImage string
// Or host:
host string
container string
}
func (m mount) getHostPath() string {
if m.host != "" {
return m.host
}
if m.rbdPool == "" {
panic("empty rbdPool")
}
if m.rbdImage == "" {
panic("empty rbdImage")
}
return fmt.Sprintf("/mnt/rbd-%s-%s", m.rbdPool, m.rbdImage)
}
type port struct {
proto string // "tcp" or "udp"
port uint16
}
var (
PortHTTP = port{"tcp", 80}
)
var containers = []*container{
// ....
// ....
{
name: "homeseer",
image: "gcr.io/bradfitz-proj/homeseer",
ip: "10.0.1.62",
mounts: []mount{
{
rbdPool: "ssd2hdd1",
rbdImage: "homeseer",
container: "/HomeSeer",
},
},
ports: []port{PortHTTP},
},
{
name: "mosquitto",
image: "eclipse-mosquitto:1.6.2",
ip: "10.0.1.71",
ports: []port{{"tcp", 1883}},
},
{
name: "roofwest",
image: "gcr.io/bradfitz-proj/ubnt-still",
ports: []port{PortHTTP},
ip: "10.0.1.100",
},
{
name: "captureroof",
image: "gcr.io/bradfitz-proj/captureroof",
ip: "10.0.1.4",
args: []string{"--prod"},
ports: []port{PortHTTP},
mounts: []mount{
{
host: "/mnt/secret/gcpkey",
container: "/mnt/secret/gcpkey",
},
{
rbdPool: "ssd2hdd1",
rbdImage: "captureroof",
container: "/mnt/ceph",
},
},
},
}
func main() {
assertUbuntu()
initCeph()
fmt.Println("ok")
var failed bool
for _, c := range containers {
if err := c.start(); err != nil {
log.Printf("starting %v: %v", c.name, err)
failed = true
}
}
if failed {
os.Exit(1)
}
}
func (c *container) start() error {
if containerRunning(c.name) {
log.Printf("%s already running.", c.name)
return nil
}
if err := addIP(c.ip); err != nil {
return err
}
for _, m := range c.mounts {
if err := addMount(m); err != nil {
return fmt.Errorf("setting up mount %+v: %w", m, err)
}
}
cmd := exec.Command("docker", "run",
"-d",
//"--rm",
"--name="+c.name,
"--env=TZ=US/Pacific",
)
for _, p := range c.ports {
cmd.Args = append(cmd.Args, "-p", fmt.Sprintf("%s:%v:%v/%s", c.ip, p.port, p.port, p.proto))
}
for _, m := range c.mounts {
cmd.Args = append(cmd.Args, "-v", fmt.Sprintf("%s:%s", m.getHostPath(), m.container))
}
cmd.Args = append(cmd.Args, c.image)
cmd.Args = append(cmd.Args, c.args...)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("running docker %q: %v, %s", cmd.Args, err, out)
}
log.Printf("started %v: %s: %q", c.name, cmd.Args, bytes.TrimSpace(out))
return nil
}
func addMount(m mount) error {
if m.host != "" {
if _, err := os.Stat(m.host); err != nil {
return err
}
return nil
}
if m.rbdPool == "" || m.rbdImage == "" {
return errors.New("required RBD fields not populated")
}
if m.container == "" {
return errors.New("container field not populated")
}
hostDir := fmt.Sprintf("/mnt/rbd-%s-%s", m.rbdPool, m.rbdImage)
mountsB, err := ioutil.ReadFile("/proc/mounts")
if err != nil {
return err
}
if mounts := string(mountsB); strings.Contains(mounts, " "+hostDir+" ") {
log.Printf("%s already mounted", hostDir)
return nil
}
if err := os.MkdirAll(hostDir, 0755); err != nil {
return nil
}
dev, err := getRBDNBDDev(m.rbdPool, m.rbdImage)
if err != nil {
return err
}
out, err := exec.Command("mount", dev, hostDir).CombinedOutput()
if err != nil {
return fmt.Errorf("running mount %s %s: %v, %s", dev, hostDir, err, out)
}
return nil
}
func getRBDNBDDev(pool, image string) (nbdDev string, err error) {
mj, err := exec.Command("rbd-nbd", "--format=json", "--pretty-format", "list-mapped").Output()
if err != nil {
return "", err
}
type Mapping struct {
Pool string `json:"pool"`
Image string `json:"image"`
Device string `json:"device"` // "/dev/nbd0"
}
var mapped []Mapping
if err := json.Unmarshal(mj, &mapped); err != nil {
return "", err
}
for _, m := range mapped {
if m.Pool == pool && m.Image == image {
if m.Device == "" {
return "", errors.New("unexpected empty Device in mapped output")
}
return m.Device, nil
}
}
out, err := exec.Command("rbd-nbd", "map", pool+"/"+image).CombinedOutput()
if err != nil {
return "", fmt.Errorf("running rbd-map: %v, %s", err, bytes.TrimSpace(out))
}
return strings.TrimSpace(string(out)), nil
}
func addIP(ip string) error {
if ip == "" {
return nil
}
out, err := exec.Command("ip", "addr", "add", ip+"/16", "dev", "br0").CombinedOutput()
if err != nil {
if strings.TrimSpace(string(out)) == "RTNETLINK answers: File exists" {
return nil
}
return fmt.Errorf("ip addr add %s/16 dev br0: %v, %s", ip, err, out)
}
return nil
}
func containerRunning(name string) bool {
out, err := exec.Command("docker", "ps", "--format={{json .}}").Output()
if err != nil {
log.Fatalf("running docker ps: %v", err)
}
type Row struct {
Names string `json:"Names"`
State string `json:"State"`
}
d := json.NewDecoder(bytes.NewReader(out))
for {
var r Row
if err := d.Decode(&r); err != nil {
return false
}
log.Printf("Row: %+v", r)
if r.Names == name && r.State == "running" {
return true
}
}
}
func assertUbuntu() {
v, err := ioutil.ReadFile("/etc/os-release")
if err != nil {
log.Fatal(err)
}
if !strings.Contains(string(v), `NAME="Ubuntu"`) {
log.Fatalf("homerunniner assumes Ubuntu for now; got %s", v)
}
}
func mustRun(prog string, args ...string) {
cmd := exec.Command(prog, args...)
out, err := cmd.CombinedOutput()
if err != nil {
log.Fatalf("Error running %s %v: %v, %s\n", prog, args, err, out)
}
}
func initCeph() {
if _, err := exec.LookPath("rbd-nbd"); err != nil {
mustRun("apt-get", "install", "-y", "rbd-nbd", "ceph-common")
}
if _, err := exec.LookPath("rbd-nbd"); err != nil {
log.Fatal(err)
}
if _, err := exec.LookPath("rbd"); err != nil {
log.Fatal(err)
}
if err := os.MkdirAll("/etc/ceph", 0700); err != nil {
log.Fatal(err)
}
if err := ioutil.WriteFile("/etc/ceph/ceph.conf", []byte(cephConf), 0644); err != nil {
log.Fatal(err)
}
if _, err := os.Stat("/etc/ceph/ceph.client.admin.keyring"); err != nil {
log.Printf("checking ceph keyring: %v", err)
}
}
const cephConf = `
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster_network = 10.0.0.0/16
fsid = XXXX
mon_allow_pool_delete = true
mon_host = 10.0.0.91 10.0.0.92 10.0.0.93
osd_journal_size = 5120
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public_network = 10.0.0.0/16
[client]
keyring = /etc/ceph/ceph.client.admin.keyring
[mds]
keyring = /var/lib/ceph/mds/ceph-$id/keyring
[mds.ss2]
host = ss2
mds_standby_for_name = pve
[mds.ss1]
host = ss1
mds_standby_for_name = pve
[mds.ss3]
host = ss3
mds standby for name = pve
[mon.ss2]
host = ss2
[mon.ss1]
host = ss1
[mon.ss3]
host = ss3
`
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment