Skip to content

Instantly share code, notes, and snippets.

@liemle3893
Created April 7, 2020 06:58
Show Gist options
  • Save liemle3893/395a82413b9b4a0247ef563dc6c8f0b6 to your computer and use it in GitHub Desktop.
Save liemle3893/395a82413b9b4a0247ef563dc6c8f0b6 to your computer and use it in GitHub Desktop.
job "minio" {
datacenters = ["dc1"]
type = "service"
group "minio1" {
ephemeral_disk {
size = 10000
sticky = true
migrate = false
}
task "minio1" {
driver = "docker"
env {
MINIO_ACCESS_KEY = "minio"
MINIO_SECRET_KEY = "minio123"
}
user = "root"
config {
image = "minio/minio:RELEASE.2020-04-04T05-39-31Z"
command = "server"
args = [
"http://minio{1...4}.service.consul:9000/data{1...2}"
]
dns_servers = ["${attr.unique.network.ip-address}"]
port_map {
http = 9000
}
privileged = true
mounts = [
# sample volume mount
{
type = "volume"
target = "/data1"
source = "data1-1"
readonly = false
},
{
type = "volume"
target = "/data2"
source = "data1-2"
readonly = false
},
]
}
service {
name = "minio1"
port = "http"
check {
name = "alive"
type = "http"
port = "http"
path = "/minio/health/live"
interval = "30s"
timeout = "20s"
initial_status = "passing"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
resources {
network {
port "http" {
static = 9000
}
}
cpu = 20
memory = 100
}
}
}
group "minio2" {
ephemeral_disk {
size = 10000
sticky = true
migrate = false
}
task "minio2" {
driver = "docker"
env {
MINIO_ACCESS_KEY = "minio"
MINIO_SECRET_KEY = "minio123"
}
user = "root"
config {
image = "minio/minio:RELEASE.2020-04-04T05-39-31Z"
command = "server"
args = [
"http://minio{1...4}.service.consul:9000/data{1...2}"
]
dns_servers = ["${attr.unique.network.ip-address}"]
port_map {
http = 9000
}
privileged = true
mounts = [
# sample volume mount
{
type = "volume"
target = "/data1"
source = "data2-1"
readonly = false
},
{
type = "volume"
target = "/data2"
source = "data2-2"
readonly = false
},
]
}
service {
name = "minio2"
port = "http"
check {
name = "alive"
type = "http"
port = "http"
path = "/minio/health/live"
interval = "30s"
timeout = "20s"
initial_status = "passing"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
resources {
network {
port "http" {
static = 9000
}
}
cpu = 20
memory = 100
}
}
}
group "minio3" {
ephemeral_disk {
size = 10000
sticky = true
migrate = false
}
task "minio3" {
driver = "docker"
env {
MINIO_ACCESS_KEY = "minio"
MINIO_SECRET_KEY = "minio123"
}
user = "root"
config {
image = "minio/minio:RELEASE.2020-04-04T05-39-31Z"
command = "server"
args = [
"http://minio{1...4}.service.consul:9000/data{1...2}"
]
dns_servers = ["${attr.unique.network.ip-address}"]
port_map {
http = 9000
}
privileged = true
mounts = [
# sample volume mount
{
type = "volume"
target = "/data1"
source = "data3-1"
readonly = false
},
{
type = "volume"
target = "/data2"
source = "data3-2"
readonly = false
},
]
}
service {
name = "minio3"
port = "http"
check {
name = "alive"
type = "http"
port = "http"
path = "/minio/health/live"
interval = "30s"
timeout = "20s"
initial_status = "passing"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
resources {
network {
port "http" {
static = 9000
}
}
cpu = 20
memory = 100
}
}
}
group "minio4" {
ephemeral_disk {
size = 10000
sticky = true
migrate = false
}
task "minio4" {
driver = "docker"
env {
MINIO_ACCESS_KEY = "minio"
MINIO_SECRET_KEY = "minio123"
}
user = "root"
config {
image = "minio/minio:RELEASE.2020-04-04T05-39-31Z"
command = "server"
args = [
"http://minio{1...4}.service.consul:9000/data{1...2}"
]
dns_servers = ["${attr.unique.network.ip-address}"]
port_map {
http = 9000
}
privileged = true
mounts = [
# sample volume mount
{
type = "volume"
target = "/data1"
source = "data4-1"
readonly = false
},
{
type = "volume"
target = "/data2"
source = "data4-2"
readonly = false
},
]
}
service {
name = "minio4"
port = "http"
check {
name = "alive"
type = "http"
port = "http"
path = "/minio/health/live"
interval = "30s"
timeout = "20s"
initial_status = "passing"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
resources {
network {
port "http" {
static = 9000
}
}
cpu = 20
memory = 100
}
}
}
}
@langerma
Copy link

langerma commented Dec 2, 2021

@liemle3893 do you run seaweedfs as nomad jobs or did you deploy it seperately. in the first case would you mind sharing your job file :-) i'd be quite keen to try it out as i am looking to use it as a storage layer for my jobs (with csi interface)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment