-
-
Save leifg/4713995 to your computer and use it in GitHub Desktop.
file_to_disk = './tmp/large_disk.vdi' | |
Vagrant::Config.run do |config| | |
config.vm.box = 'base' | |
config.vm.customize ['createhd', '--filename', file_to_disk, '--size', 500 * 1024] | |
config.vm.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', file_to_disk] | |
end |
Here is my solution. It works with VBox 5.2
The disk is stored together with the Virtual Machine
class VagrantPlugins::ProviderVirtualBox::Action::SetName
alias_method :original_call, :call
def call(env)
machine = env[:machine]
driver = machine.provider.driver
uuid = driver.instance_eval { @uuid }
ui = env[:ui]
# Find out folder of VM
vm_folder = ""
vm_info = driver.execute("showvminfo", uuid, "--machinereadable")
lines = vm_info.split("\n")
lines.each do |line|
if line.start_with?("CfgFile")
vm_folder = line.split("=")[1].gsub('"','')
vm_folder = File.expand_path("..", vm_folder)
ui.info "VM Folder is: #{vm_folder}"
end
end
size = 10240
disk_file = vm_folder + "/disk1.vmdk"
ui.info "Adding disk to VM"
if File.exist?(disk_file)
ui.info "Disk already exists"
else
ui.info "Creating new disk"
driver.execute("createmedium", "disk", "--filename", disk_file, "--size", "#{size}", "--format", "VMDK")
ui.info "Attaching disk to VM"
driver.execute('storageattach', uuid, '--storagectl', "SATA Controller", '--port', "1", '--type', 'hdd', '--medium', disk_file)
end
original_call(env)
end
end
Hi could any one tell how can i use an existing hard disk on the host machine to be used as a hard disk of a vagrant VM .
when a vagrant spins up a Vm i want that VM to have the hard disk which is attached to my host VM ,say of 1TB.
and i want to bring one more VM with second hard disk which is also attached to my host VM say of 100gb. how can i do this in VagrantFile.
Adding this to you Vagrantfile might be enough to more thoroughly meet your needs, but will not handling the formatting phase like the plugin can (maybe ideal). The reason I wrote this is that things were going haywire for me if I ran certain vagrant operations after the controller already existed and it tried to create it again.
def sata_controller_exists?(controller_name="SATA Controller")
`vboxmanage showvminfo storage-host-vm-dev | grep " #{controller_name}" | wc -l`.to_i == 1
end
def port_in_use?(controller_name, port)
`vboxmanage showvminfo storage-host-vm-dev | grep "SATA Controller (#{port}, " | wc -l`.to_i == 1
end
def attach_hdd(v, controller_name, port, hdd_path)
unless port_in_use?(controller_name, port)
v.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', port, '--device', 0, '--type', 'hdd', '--medium', hdd_path]
end
end
.
.
.
# Note that I have a multi-VM Vagrantfile so run this based on which VM is being iterated over...
if vm[:name] == 'storage-host-vm'
controller_name = 'SATA Controller'
v.customize ['storagectl', :id, '--name', controller_name, '--add', 'sata', '--portcount', 4] unless sata_controller_exists?(controller_name)
file_to_disk = "./packer_cache/#{vm[:name]}_vault_1.vdi"
v.customize ['createhd', '--filename', file_to_disk, '--size', 1 * 1024] unless File.exist?(file_to_disk)
attach_hdd(v, controller_name, 0, file_to_disk)
end
.
.
.
In Vagrant 2.1.4, I'm getting:
`instance_eval': stack level too deep (SystemStackError)
It's probably because of the
uuid = driver.instance_eval { @uuid }
Any idea how to fix it?
Trying to run 3 Ubuntu nodes with 3 disks in vagrant.
Has anyone managed to do this?
Below is a WORKING vagrantfile with 3 centos7 nodes with 3 disks.
But it DOESN'T WORK for ubuntu.
$sdb1 = <<-SCRIPT
parted /dev/sdb mklabel msdos
parted /dev/sdb mkpart primary 0% 100%
mkfs.xfs /dev/sdb1
mkdir /mnt/data1
if grep -Fxq "sdb1" /etc/fstab
then
echo 'sdb1 exist in fstab'
else
echo `blkid /dev/sdb1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/data1 xfs noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/data1 > /dev/null; then
echo "/dev/sdb1 mounted /mnt/data1"
umount /mnt/data1
mount /mnt/data1
else
mount /mnt/data1
fi
SCRIPT
$sdc1 = <<-SCRIPT
parted /dev/sdc mklabel msdos
parted /dev/sdc mkpart primary 0% 100%
mkfs.xfs /dev/sdc1
mkdir /mnt/data2
if grep -Fxq "sdc1" /etc/fstab
then
echo 'sdc1 exist in fstab'
else
echo `blkid /dev/sdc1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/data2 xfs noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/data2 > /dev/null; then
echo "/dev/sdc1 mounted /mnt/data2"
umount /mnt/data2
mount /mnt/data2
else
mount /mnt/data2
fi
SCRIPT
$sdd1 = <<-SCRIPT
parted /dev/sdd mklabel msdos
parted /dev/sdd mkpart primary 0% 100%
mkfs.xfs /dev/sdd1
mkdir /mnt/metadata1
if grep -Fxq "sdd1" /etc/fstab
then
echo 'sdd1 exist in fstab'
else
echo `blkid /dev/sdd1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/metadata1 xfs noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/metadata1 > /dev/null; then
echo "/dev/sdd1 mounted /mnt/metadata1"
umount /mnt/metadata1
mount /mnt/metadata1
else
mount /mnt/metadata1
fi
SCRIPT
node1disk1 = "./tmp/node1disk1.vdi";
node1disk2 = "./tmp/node1disk2.vdi";
node1disk3 = "./tmp/node1disk3.vdi";
ip_node1 = "192.168.33.31";
Vagrant.configure("2") do |config|
config.vm.define "node1" do |node1|
node1.vm.network "private_network", ip: ip_node1
node1.vm.hostname = "node1"
node1.vm.define "node1"
node1.vm.box_download_insecure = true
node1.vm.box = "centos/7"
node1.vm.provider "virtualbox" do |vb|
vb.memory = "2048"
if not File.exists?(node1disk1)
vb.customize ['createhd', '--filename', node1disk1, '--variant', 'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 0, '--device', 1, '--type', 'hdd', '--medium', node1disk1]
end
if not File.exists?(node1disk2)
vb.customize ['createhd', '--filename', node1disk2, '--variant', 'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', node1disk2]
end
if not File.exists?(node1disk3)
vb.customize ['createhd', '--filename', node1disk3, '--variant', 'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 1, '--device', 1, '--type', 'hdd', '--medium', node1disk3]
end
end
node1.vm.provision "shell", inline: $sdb1
node1.vm.provision "shell", inline: $sdc1
node1.vm.provision "shell", inline: $sdd1
end
end
In the process of experimenting had to come to this Vagrantfile.
But the system cannot boot
$sdb1 = <<-SCRIPT
parted /dev/sdb mklabel msdos
parted /dev/sdb mkpart primary 0% 100%
mkfs.xfs /dev/sdb1
mkdir /mnt/data1
if grep -Fxq "sdb1" /etc/fstab
then
echo 'sdb1 exist in fstab'
else
echo `blkid /dev/sdb1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/data1 xfs noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/data1 > /dev/null; then
echo "/dev/sdb1 mounted /mnt/data1"
umount /mnt/data1
mount /mnt/data1
else
mount /mnt/data1
fi
SCRIPT
$sdc1 = <<-SCRIPT
parted /dev/sdc mklabel msdos
parted /dev/sdc mkpart primary 0% 100%
mkfs.xfs /dev/sdc1
mkdir /mnt/data2
if grep -Fxq "sdc1" /etc/fstab
then
echo 'sdc1 exist in fstab'
else
echo `blkid /dev/sdc1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/data2 xfs noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/data2 > /dev/null; then
echo "/dev/sdc1 mounted /mnt/data2"
umount /mnt/data2
mount /mnt/data2
else
mount /mnt/data2
fi
SCRIPT
$sdd1 = <<-SCRIPT
parted /dev/sdd mklabel msdos
parted /dev/sdd mkpart primary 0% 100%
mkfs.xfs /dev/sdd1
mkdir /mnt/metadata1
if grep -Fxq "sdd1" /etc/fstab
then
echo 'sdd1 exist in fstab'
else
echo `blkid /dev/sdd1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/metadata1 xfs noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/metadata1 > /dev/null; then
echo "/dev/sdd1 mounted /mnt/metadata1"
umount /mnt/metadata1
mount /mnt/metadata1
else
mount /mnt/metadata1
fi
SCRIPT
node1disk1 = "./tmp/node1disk1.vdi";
node1disk2 = "./tmp/node1disk2.vdi";
node1disk3 = "./tmp/node1disk3.vdi";
ip_node1 = "192.168.33.31";
Vagrant.configure("2") do |config|
config.vm.define "node1" do |node1|
node1.vm.network "private_network", ip: ip_node1
node1.vm.hostname = "node1"
node1.vm.define "node1"
node1.vm.box_download_insecure = true
node1.vm.box = "ubuntu/bionic64"
node1.vm.provider "virtualbox" do |vb|
vb.gui = true
vb.memory = "1024"
vb.customize ["storagectl", :id, "--name", "IDE", "--remove"]
vb.customize ["storagectl", :id, "--name", "IDE", "--add", "ide", "--controller", "ICH6"]
if not File.exists?(node1disk1)
vb.customize ['createhd', '--filename', node1disk1, '--variant', 'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 0, '--device', 1, '--type', 'hdd', '--medium', node1disk1]
end
if not File.exists?(node1disk2)
vb.customize ['createhd', '--filename', node1disk2, '--variant', 'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', node1disk2]
end
if not File.exists?(node1disk3)
vb.customize ['createhd', '--filename', node1disk3, '--variant', 'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 1, '--device', 1, '--type', 'hdd', '--medium', node1disk3]
end
end
node1.vm.provision "shell", inline: $sdb1
node1.vm.provision "shell", inline: $sdc1
node1.vm.provision "shell", inline: $sdd1
end
end
VBoxManage showvminfo says:
Storage Controller Name (0): SCSI
Storage Controller Type (0): LsiLogic
Storage Controller Instance Number (0): 0
Storage Controller Max Port Count (0): 16
Storage Controller Port Count (0): 16
Storage Controller Bootable (0): on
Storage Controller Name (1): IDE
Storage Controller Type (1): ICH6
Storage Controller Instance Number (1): 0
Storage Controller Max Port Count (1): 2
Storage Controller Port Count (1): 2
Storage Controller Bootable (1): on
SCSI (0, 0): /home/user/VirtualBox VMs/vagrant-openio-multi-nodes_node1_1565541256124_28246/ubuntu-bionic-18.04-cloudimg.vmdk (UUID: 9b9b05cc-d359-428e-a4c5-91391eb7e0e3)
SCSI (1, 0): /home/user/VirtualBox VMs/vagrant-openio-multi-nodes_node1_1565541256124_28246/ubuntu-bionic-18.04-cloudimg-configdrive.vmdk (UUID: 5e47924d-2ad2-4096-9a58-7b97d2ffcbd8)
IDE (0, 1): /home/user/github/vagrant-openio-multi-nodes/tmp/node1disk1.vdi (UUID: d2ef2936-f296-483c-9336-04b5bbd417e9)
IDE (1, 0): /home/user/github/vagrant-openio-multi-nodes/tmp/node1disk2.vdi (UUID: 2673732a-edf3-48f2-8ecb-50af82b1d2e5)
IDE (1, 1): /home/user/github/vagrant-openio-multi-nodes/tmp/node1disk3.vdi (UUID: f2243189-ebba-496a-aab8-cb97f68b4038)
Here is what worked for me