Skip to content

Instantly share code, notes, and snippets.

@MagicRB
Last active January 12, 2021 17:09
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save MagicRB/08f7c93a8ff057cdf05c456aa1c32930 to your computer and use it in GitHub Desktop.
Save MagicRB/08f7c93a8ff057cdf05c456aa1c32930 to your computer and use it in GitHub Desktop.
type = "csi"
id = "gitea"
name = "gitea"
plugin_id = "nfs"
access_mode = "single-node-writer"
attachment_mode = "file-system"
context {
server = "ip"
share = "/gitea"
}
job "gitea" {
datacenters = [ "homelab-1" ]
type = "service"
group "gitea" {
count = 1
network {
port "http" {
static = "8666"
to = "3000"
}
}
volume "gitea" {
type = "csi"
read_only = false
source = "gitea"
}
task "gitea" {
driver = "docker"
volume_mount {
volume = "gitea"
destination = "/data/gitea"
read_only = false
}
config {
image = "gitea:local"
ports = ["http"]
}
service {
name = "gitea"
port = "http"
}
env {
}
}
}
}
server {
enabled = true
bootstrap_expect = 1
encrypt = ""
}
tls {
}
acl {
enabled = true
}
client {
options {
docker.privileged.enabled = "true"
}
enabled = true
}
disable_update_check = true
datacenter = "homelab-1"
data_dir = "/var/lib/nomad"
agent.plugin_loader: skipping external plugins since plugin_dir doesn't exist: plugin_dir=/var/lib/nomad/plugins
agent: detected plugin: name=exec type=driver plugin_version=0.1.0
agent: detected plugin: name=qemu type=driver plugin_version=0.1.0
agent: detected plugin: name=java type=driver plugin_version=0.1.0
agent: detected plugin: name=docker type=driver plugin_version=0.1.0
agent: detected plugin: name=raw_exec type=driver plugin_version=0.1.0
agent: detected plugin: name=nvidia-gpu type=device plugin_version=0.1.0
nomad.raft: restored from snapshot: id=4-16385-1610332689615
nomad.raft: initial configuration: index=1 servers="[{Suffrage:Voter ID:10.64.1.201:4647 Address:10.64.1.201:4647}]"
nomad.raft: entering follower state: follower="Node at 10.64.1.201:4647 [Follower]" leader=
nomad: serf: EventMemberJoin: blowhole.global 10.64.1.201
nomad: starting scheduling worker(s): num_workers=4 schedulers=[service, batch, system, _core]
client: using state directory: state_dir=/var/lib/nomad/client
nomad: serf: Failed to re-join any previously known node
nomad: adding server: server="blowhole.global (Addr: 10.64.1.201:4647) (DC: homelab-1)"
client: using alloc directory: alloc_dir=/var/lib/nomad/alloc
client.fingerprint_mgr.cgroup: cgroups are available
client.fingerprint_mgr.consul: consul agent is available
nomad.raft: heartbeat timeout reached, starting election: last-leader=
nomad.raft: entering candidate state: node="Node at 10.64.1.201:4647 [Candidate]" term=14
nomad.raft: election won: tally=1
nomad.raft: entering leader state: leader="Node at 10.64.1.201:4647 [Leader]"
nomad: cluster leadership acquired
nomad.fsm: deregistering job failed: job=plugin-nfs-nodes error="DeleteJob failed: deleting job from plugin: plugin mi>Jan 12 17:53:54 blowhole nomad[173324]: 2021-01-12T17:53:48.701+0100 [ERROR] nomad.fsm: deregistering job failed: job=plugin-nfs-controller error="DeleteJob failed: deleting job from plugin: plug>Jan 12 17:53:54 blowhole nomad[173324]: 2021-01-12T17:53:48.703+0100 [ERROR] nomad.fsm: deregistering job failed: job=plugin-nfs-controller error="DeleteJob failed: deleting job from plugin: plug>Jan 12 17:53:54 blowhole nomad[173324]: 2021-01-12T17:53:48.704+0100 [ERROR] nomad.fsm: deregistering job failed: job=plugin-nfs-controller error="DeleteJob failed: deleting job from plugin: plug>Jan 12 17:53:54 blowhole nomad[173324]: 2021-01-12T17:53:48.751+0100 [ERROR] nomad.fsm: deregistering job failed: job=plugin-nfs-nodes error="DeleteJob failed: deleting job from plugin: plugin mi>Jan 12 17:53:54 blowhole nomad[173324]: 2021-01-12T17:53:48.911+0100 [ERROR] nomad.fsm: CSIVolumeClaim failed: error=unschedulable
client.plugin: starting plugin manager: plugin-type=driver
client.plugin: starting plugin manager: plugin-type=device
client: node registration complete
client: node registration complete
client.alloc_runner.task_runner.task_hook.logmon.nomad: opening fifo: alloc_id=4cc4dfb4-14a3-8c7b-14b6-7c3146f69299 ta>Jan 12 17:58:14 blowhole nomad[173324]: 2021-01-12T17:58:14.265+0100 [INFO] client.alloc_runner.task_runner.task_hook.logmon.nomad: opening fifo: alloc_id=4cc4dfb4-14a3-8c7b-14b6-7c3146f69299 ta>Jan 12 17:58:15 blowhole nomad[173324]: 2021-01-12T17:58:15.721+0100 [INFO] client.driver_mgr.docker: created container: driver=docker container_id=32d0ece05d8013abdef614894d325a1a1afc4364c7f21d>Jan 12 17:58:17 blowhole nomad[173324]: 2021-01-12T17:58:17.710+0100 [INFO] client.driver_mgr.docker: started container: driver=docker container_id=32d0ece05d8013abdef614894d325a1a1afc4364c7f21d>Jan 12 17:58:30 blowhole nomad[173324]: 2021-01-12T17:58:30.355+0100 [INFO] client: node registration complete
client.alloc_runner.task_runner.task_hook.logmon.nomad: opening fifo: alloc_id=68fe0b48-af68-eeca-0896-6765dce4e9a1 ta>Jan 12 17:59:02 blowhole nomad[173324]: 2021-01-12T17:59:02.110+0100 [INFO] client.alloc_runner.task_runner.task_hook.logmon.nomad: opening fifo: alloc_id=68fe0b48-af68-eeca-0896-6765dce4e9a1 ta>Jan 12 17:59:03 blowhole nomad[173324]: 2021-01-12T17:59:03.710+0100 [INFO] client.driver_mgr.docker: created container: driver=docker container_id=f51947fffcb0c3de724928d2ed30735908ec74b3f913c4>Jan 12 17:59:05 blowhole nomad[173324]: 2021-01-12T17:59:05.759+0100 [INFO] client.driver_mgr.docker: started container: driver=docker container_id=f51947fffcb0c3de724928d2ed30735908ec74b3f913c4>Jan 12 17:59:15 blowhole nomad[173324]: 2021-01-12T17:59:15.903+0100 [INFO] client: node registration complete
client.driver_mgr.docker: created container: driver=docker container_id=32d0ece05d8013abdef614894d325a1a1afc4364c7f21d>Jan 12 17:58:17 blowhole nomad[173324]: 2021-01-12T17:58:17.710+0100 [INFO] client.driver_mgr.docker: started container: driver=docker container_id=32d0ece05d8013abdef614894d325a1a1afc4364c7f21d>Jan 12 17:58:30 blowhole nomad[173324]: 2021-01-12T17:58:30.355+0100 [INFO] client: node registration complete
client.alloc_runner.task_runner.task_hook.logmon.nomad: opening fifo: alloc_id=68fe0b48-af68-eeca-0896-6765dce4e9a1 ta>Jan 12 17:59:02 blowhole nomad[173324]: 2021-01-12T17:59:02.110+0100 [INFO] client.alloc_runner.task_runner.task_hook.logmon.nomad: opening fifo: alloc_id=68fe0b48-af68-eeca-0896-6765dce4e9a1 ta>Jan 12 17:59:03 blowhole nomad[173324]: 2021-01-12T17:59:03.710+0100 [INFO] client.driver_mgr.docker: created container: driver=docker container_id=f51947fffcb0c3de724928d2ed30735908ec74b3f913c4>Jan 12 17:59:05 blowhole nomad[173324]: 2021-01-12T17:59:05.759+0100 [INFO] client.driver_mgr.docker: started container: driver=docker container_id=f51947fffcb0c3de724928d2ed30735908ec74b3f913c4>Jan 12 17:59:15 blowhole nomad[173324]: 2021-01-12T17:59:15.903+0100 [INFO] client: node registration complete
nomad.fsm: CSIVolumeRegister failed: error="volume exists: gitea"
http: request failed: method=PUT path=/v1/volume/csi/gitea error="volume exists: gitea" code=500
nomad.fsm: CSIVolumeDeregister failed: error="volume in use: gitea"
http: request failed: method=DELETE path=/v1/volume/csi/gitea?force=false error="volume in use: gitea" code=500
[ERROR] nomad.fsm: CSIVolumeRegister failed: error="volume exists: gitea"
[ERROR] http: request failed: method=PUT path=/v1/volume/csi/gitea error="volume exists: gitea" code=500
[ERROR] nomad.fsm: CSIVolumeDeregister failed: error="volume in use: gitea"
[ERROR] http: request failed: method=DELETE path=/v1/volume/csi/gitea?force=false error="volume in use: gitea" code=500
job "plugin-nfs-controller" {
datacenters = [ "homelab-1" ]
group "controller" {
task "plugin" {
driver = "docker"
config {
image = "csi-driver-nfs:local"
args = [
"--endpoint=unix://csi/csi.sock",
"--nodeid=contoller",
"--logtostderr",
"-v=5",
]
}
csi_plugin {
id = "nfs"
type = "controller"
mount_dir = "/csi"
}
resources {
cpu = 250
memory = 128
}
}
}
}
job "plugin-nfs-nodes" {
datacenters = [ "homelab-1" ]
type = "system"
group "nodes" {
task "plugin" {
driver = "docker"
config {
image = "csi-driver-nfs:local"
args = [
"--endpoint=unix://csi/csi.sock",
"--nodeid=blowhole-0",
"--logtostderr",
"--v=5",
]
network_mode = "nfs"
ipv4_address = "172.69.0.10"
privileged = true
}
csi_plugin {
id = "nfs"
type = "node"
mount_dir = "/csi"
}
resources {
cpu = 250
memory = 128
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment