Skip to content

Instantly share code, notes, and snippets.

@liemle3893
Created April 7, 2020 06:58
Show Gist options
  • Star 4 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save liemle3893/395a82413b9b4a0247ef563dc6c8f0b6 to your computer and use it in GitHub Desktop.
Save liemle3893/395a82413b9b4a0247ef563dc6c8f0b6 to your computer and use it in GitHub Desktop.
job "minio" {
datacenters = ["dc1"]
type = "service"
group "minio1" {
ephemeral_disk {
size = 10000
sticky = true
migrate = false
}
task "minio1" {
driver = "docker"
env {
MINIO_ACCESS_KEY = "minio"
MINIO_SECRET_KEY = "minio123"
}
user = "root"
config {
image = "minio/minio:RELEASE.2020-04-04T05-39-31Z"
command = "server"
args = [
"http://minio{1...4}.service.consul:9000/data{1...2}"
]
dns_servers = ["${attr.unique.network.ip-address}"]
port_map {
http = 9000
}
privileged = true
mounts = [
# sample volume mount
{
type = "volume"
target = "/data1"
source = "data1-1"
readonly = false
},
{
type = "volume"
target = "/data2"
source = "data1-2"
readonly = false
},
]
}
service {
name = "minio1"
port = "http"
check {
name = "alive"
type = "http"
port = "http"
path = "/minio/health/live"
interval = "30s"
timeout = "20s"
initial_status = "passing"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
resources {
network {
port "http" {
static = 9000
}
}
cpu = 20
memory = 100
}
}
}
group "minio2" {
ephemeral_disk {
size = 10000
sticky = true
migrate = false
}
task "minio2" {
driver = "docker"
env {
MINIO_ACCESS_KEY = "minio"
MINIO_SECRET_KEY = "minio123"
}
user = "root"
config {
image = "minio/minio:RELEASE.2020-04-04T05-39-31Z"
command = "server"
args = [
"http://minio{1...4}.service.consul:9000/data{1...2}"
]
dns_servers = ["${attr.unique.network.ip-address}"]
port_map {
http = 9000
}
privileged = true
mounts = [
# sample volume mount
{
type = "volume"
target = "/data1"
source = "data2-1"
readonly = false
},
{
type = "volume"
target = "/data2"
source = "data2-2"
readonly = false
},
]
}
service {
name = "minio2"
port = "http"
check {
name = "alive"
type = "http"
port = "http"
path = "/minio/health/live"
interval = "30s"
timeout = "20s"
initial_status = "passing"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
resources {
network {
port "http" {
static = 9000
}
}
cpu = 20
memory = 100
}
}
}
group "minio3" {
ephemeral_disk {
size = 10000
sticky = true
migrate = false
}
task "minio3" {
driver = "docker"
env {
MINIO_ACCESS_KEY = "minio"
MINIO_SECRET_KEY = "minio123"
}
user = "root"
config {
image = "minio/minio:RELEASE.2020-04-04T05-39-31Z"
command = "server"
args = [
"http://minio{1...4}.service.consul:9000/data{1...2}"
]
dns_servers = ["${attr.unique.network.ip-address}"]
port_map {
http = 9000
}
privileged = true
mounts = [
# sample volume mount
{
type = "volume"
target = "/data1"
source = "data3-1"
readonly = false
},
{
type = "volume"
target = "/data2"
source = "data3-2"
readonly = false
},
]
}
service {
name = "minio3"
port = "http"
check {
name = "alive"
type = "http"
port = "http"
path = "/minio/health/live"
interval = "30s"
timeout = "20s"
initial_status = "passing"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
resources {
network {
port "http" {
static = 9000
}
}
cpu = 20
memory = 100
}
}
}
group "minio4" {
ephemeral_disk {
size = 10000
sticky = true
migrate = false
}
task "minio4" {
driver = "docker"
env {
MINIO_ACCESS_KEY = "minio"
MINIO_SECRET_KEY = "minio123"
}
user = "root"
config {
image = "minio/minio:RELEASE.2020-04-04T05-39-31Z"
command = "server"
args = [
"http://minio{1...4}.service.consul:9000/data{1...2}"
]
dns_servers = ["${attr.unique.network.ip-address}"]
port_map {
http = 9000
}
privileged = true
mounts = [
# sample volume mount
{
type = "volume"
target = "/data1"
source = "data4-1"
readonly = false
},
{
type = "volume"
target = "/data2"
source = "data4-2"
readonly = false
},
]
}
service {
name = "minio4"
port = "http"
check {
name = "alive"
type = "http"
port = "http"
path = "/minio/health/live"
interval = "30s"
timeout = "20s"
initial_status = "passing"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
resources {
network {
port "http" {
static = 9000
}
}
cpu = 20
memory = 100
}
}
}
}
@mrinalwahal
Copy link

mrinalwahal commented May 6, 2021

Hey, were you able to execute this in nomad? I'm also trying to set-up distributed Min.io storage.

I ran this configuration of yours, and the port 9000 of all 4 instances are colliding. How did you route them to a single point? Did you run an NGINX job in addition to these 4 jobs? If yes, can you please give me the nomad configuration file for the same?

@liemle3893
Copy link
Author

Hey, were you able to execute this in nomad? I'm also trying to set-up distributed Min.io storage.

I ran this configuration of yours, and the port 9000 of all 4 instances are colliding. How did you route them to a single point? Did you run an NGINX job in addition to these 4 jobs? If yes, can you please give me the nomad configuration file for the same?

Hi there. Its has been a while since my last time checking gist notification.

In case you are still interesting in using Minio + S3. I would suggest you to use SeaweedFS, which is, easier to deploy, manage and scale.

My setup is:

User -> Minio Gateway -> SeaweedFS.

@langerma
Copy link

job "s3" {
  type        = "service"
  datacenters = ["somedc"]
  update {
    max_parallel      = 1
    progress_deadline = "10m"
  }

  group "minio" {
    count = 6

    volume "minio" {
      type      = "host"
      source    = "minio-data"
      read_only = false
    }

    network {
      port "minio" {
        static = 9000
      }
      port "console" {
        static = 9001
      }
      dns {
        servers = ["169.254.1.1"]
      }
    }

    task "minio" {
      driver = "docker"
      volume_mount {
        volume      = "minio"
        destination = "/export"
      }
      config {
        image = "minio/minio:RELEASE.2021-11-03T03-36-36Z"
        network_mode = "host"
        ports        = ["minio", "console"]
        force_pull   = true

        args = [
          "server",
          "--address",
          "${NOMAD_IP_minio}:9000",
          "--console-address",
          "${NOMAD_IP_console}:9001",
          "http://client{1...6}/export/data"
        ]
      }

      env {
        MINIO_ROOT_USER            = "xxxxxxxxx"
        MINIO_ROOT_PASSWORD        = "xxxxxxxx"
        MINIO_BROWSER_REDIRECT_URL = "http://console.minio.xxxxxxx.org"
      }

      service {
        name = "minio"
        tags = [
          "frontend",
          "minio",
          "traefik.enable=true",
          "traefik.http.routers.minio.rule=Host(`minio.xxxxxxxx.org`) || Host(`minio.service.consul`)",
          "traefik.http.services.minio.loadBalancer.sticky.cookie=true",
          "traefik.http.services.minio.loadBalancer.sticky.cookie.name=miniocooki",
          "traefik.frontend.entryPoints=http"
        ]
        port = "minio"
        check {
          type     = "http"
          path     = "/minio/health/live"
          port     = "minio"
          interval = "30s"
          timeout  = "2s"
        }
      }

      service {
        port = "console"
        name = "console"
        tags = [
          "console",
          "minio",
          "traefik.enable=true",
          "traefik.http.routers.minioconsole.rule=Host(`console.minio.xxxxxxxxxx.org`)",
          "traefik.frontend.entryPoints=http"
        ]
        check {
          type     = "http"
          path     = "/"
          port     = "console"
          interval = "30s"
          timeout  = "2s"
        }
      }

      resources {
        cpu    = 1000 # MHz
        memory = 4096 # MB
      }
    }
  }
}

i have been able to run minio like this

@langerma
Copy link

langerma commented Dec 2, 2021

@liemle3893 do you run seaweedfs as nomad jobs or did you deploy it seperately. in the first case would you mind sharing your job file :-) i'd be quite keen to try it out as i am looking to use it as a storage layer for my jobs (with csi interface)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment