Note that since this is only a setup for local testing, there is no redundancy or authentication configuration.
- The Vagrantfile for a two-node set-up
# -*- mode: ruby -*-
# vi: set ft=ruby :
hosts = {
"gluster1" => {
:ip => "10.8.0.20",
:cpus => 1,
:memory => 512
},
"gluster2" => {
:ip => "10.8.0.21",
:cpus => 1,
:memory => 512
}
}
Vagrant.configure("2") do |config|
# always use Vagrants insecure key
config.ssh.insert_key = false
# forward ssh agent to easily ssh into the different machines
config.ssh.forward_agent = true
check_guest_additions = false
functional_vboxsf = false
config.vm.box = "bento/fedora-28"
hosts.each do |name, m|
config.vm.define name do |machine|
machine.vm.hostname = name
machine.vm.network :private_network, ip: m[:ip]
machine.vm.provider "virtualbox" do |v|
v.name = name
v.cpus = m[:cpus]
v.memory = m[:memory]
end
end
end
end
- Follow https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/ to set up the GlusterFS cluster but leave /dev/sdb as raw devices for both nodes
- Run
dnf install heketi heketi-client
to install heketi and heketi-client - Configure heketi api server
{
"_port_comment": "Heketi Server Port Number",
"port": "8080",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": false,
"_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "My Secret"
},
"_user": "User only has access to /volumes endpoint",
"user": {
"key": "My Secret"
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": [
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor": "ssh",
"_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/etc/heketi/ssh.key",
"user": "vagrant",
"port": "22",
"sudo": true,
"fstab": "/etc/fstab"
},
"_kubeexec_comment": "Kubernetes configuration",
"kubeexec": {
"host": "https://kubernetes.host:8443",
"cert": "/path/to/crt.file",
"insecure": false,
"user": "kubernetes username",
"password": "password for kubernetes user",
"namespace": "OpenShift project or Kubernetes namespace",
"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab"
},
"_db_comment": "Database file name",
"db": "/var/lib/heketi/heketi.db",
"_loglevel_comment": [
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel": "debug"
}
}
- Follow https://github.com/heketi/heketi/blob/master/docs/admin/topology.md to load the topology
{
"clusters": [ {
"nodes": [
{
"node": {
"hostnames": {
"manage": ["10.8.0.20"],
"storage": [
"10.8.0.20"
]
},
"zone": 1
},
"devices": ["/dev/sdb"]
}, {
"node": {
"hostnames": {
"manage": [
"10.8.0.21"
],
"storage": [
"10.8.0.21"
]
},
"zone": 1
},
"devices": ["/dev/sdb"]
}]
}
]
}
- StorageClass in Kubernetes
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://10.8.0.20:8080"
clusterid: "9da032224088de8fb9c71ed129739dbe"
restauthenabled: "false"
volumetype: "none"
- Test PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gluster1
annotations:
volume.beta.kubernetes.io/storage-class: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
- In order for k8s pods to mount the pvc, glusterfs-client needs to be installed on the nodes where pods are running.