Skip to content

Instantly share code, notes, and snippets.

@joec4i
Last active June 16, 2018 17:42
Show Gist options
  • Save joec4i/1003c781c04c2e2b51343a1444a15e1c to your computer and use it in GitHub Desktop.
Save joec4i/1003c781c04c2e2b51343a1444a15e1c to your computer and use it in GitHub Desktop.
Setting up GlusterFS on Vagrant for Kubernetes

Setting up GlusterFS on Vagrant for Kubernetes

Note that since this is only a setup for local testing, there is no redundancy or authentication configuration.

  • The Vagrantfile for a two-node set-up
# -*- mode: ruby -*-
# vi: set ft=ruby :
hosts = {
  "gluster1" => {
    :ip => "10.8.0.20",
    :cpus => 1,
    :memory => 512
  },

  "gluster2" => {
    :ip => "10.8.0.21",
    :cpus => 1,
    :memory => 512
  }
}

Vagrant.configure("2") do |config|
  # always use Vagrants insecure key
  config.ssh.insert_key = false
  # forward ssh agent to easily ssh into the different machines
  config.ssh.forward_agent = true

  check_guest_additions = false
  functional_vboxsf     = false

  config.vm.box = "bento/fedora-28"
  hosts.each do |name, m|
    config.vm.define name do |machine|
      machine.vm.hostname = name
      machine.vm.network :private_network, ip: m[:ip]
      machine.vm.provider "virtualbox" do |v|
        v.name = name
        v.cpus = m[:cpus]
        v.memory = m[:memory]
      end
    end
  end
end
{
  "_port_comment": "Heketi Server Port Number",
  "port": "8080",
  "_use_auth": "Enable JWT authorization. Please enable for deployment",
  "use_auth": false,
  "_jwt": "Private keys for access",
  "jwt": {
    "_admin": "Admin has access to all APIs",
    "admin": {
      "key": "My Secret"
    },
    "_user": "User only has access to /volumes endpoint",
    "user": {
      "key": "My Secret"
    }
  },
  "_glusterfs_comment": "GlusterFS Configuration",
  "glusterfs": {
    "_executor_comment": [
      "Execute plugin. Possible choices: mock, ssh",
      "mock: This setting is used for testing and development.",
      "      It will not send commands to any node.",
      "ssh:  This setting will notify Heketi to ssh to the nodes.",
      "      It will need the values in sshexec to be configured.",
      "kubernetes: Communicate with GlusterFS containers over",
      "            Kubernetes exec api."
    ],
    "executor": "ssh",
    "_sshexec_comment": "SSH username and private key file information",
    "sshexec": {
      "keyfile": "/etc/heketi/ssh.key",
      "user": "vagrant",
      "port": "22",
      "sudo": true,
      "fstab": "/etc/fstab"
    },
    "_kubeexec_comment": "Kubernetes configuration",
    "kubeexec": {
      "host": "https://kubernetes.host:8443",
      "cert": "/path/to/crt.file",
      "insecure": false,
      "user": "kubernetes username",
      "password": "password for kubernetes user",
      "namespace": "OpenShift project or Kubernetes namespace",
      "fstab": "Optional: Specify fstab file on node.  Default is /etc/fstab"
    },
    "_db_comment": "Database file name",
    "db": "/var/lib/heketi/heketi.db",
    "_loglevel_comment": [
      "Set log level. Choices are:",
      "  none, critical, error, warning, info, debug",
      "Default is warning"
    ],
    "loglevel": "debug"
  }
}
{
  "clusters": [ {
      "nodes": [
        {
          "node": {
            "hostnames": {
              "manage": ["10.8.0.20"],
              "storage": [
                "10.8.0.20"
              ]
            },
            "zone": 1
          },
          "devices": ["/dev/sdb"]
        }, {
          "node": {
            "hostnames": {
              "manage": [
                "10.8.0.21"
              ],
              "storage": [
                "10.8.0.21"
              ]
            },
            "zone": 1
          },
          "devices": ["/dev/sdb"]
        }]
    }
  ]
}
  • StorageClass in Kubernetes
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: default
provisioner: kubernetes.io/glusterfs
parameters:
  resturl: "http://10.8.0.20:8080"
  clusterid: "9da032224088de8fb9c71ed129739dbe"
  restauthenabled: "false"
  volumetype: "none"
  • Test PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
 name: gluster1
 annotations:
   volume.beta.kubernetes.io/storage-class: default
spec:
 accessModes:
  - ReadWriteOnce
 resources:
   requests:
     storage: 5Gi
  • In order for k8s pods to mount the pvc, glusterfs-client needs to be installed on the nodes where pods are running.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment