Skip to content

Instantly share code, notes, and snippets.

@braoru
Last active August 29, 2015 14:20
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save braoru/5a25b1f90c3f26c8cfc0 to your computer and use it in GitHub Desktop.
Save braoru/5a25b1f90c3f26c8cfc0 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# Copyright (C) 2015-:
# Pasche Sebastien, sebastien.pasche@leshop.ch
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
pvomni.wrapper
~~~~~~~~~~~~~
A vmware pyvmomi helper and wrapper for general and simple
vmware related manipulations
"""
import logging
import time
from pprintpp import pprint
from pyVim import connect
from pyVmomi import vmodl, vim, pyVmomi
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("test_pyvmomi")
class PyVmomiHelper:
##Moved to driver
@classmethod
def create_nic_from_type_name(cls, nic_type_name):
if str(nic_type_name).lower() == 'virtualvmxnet':
return vim.vm.device.VirtualVmxnet()
elif str(nic_type_name).lower() == 'virtualvmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif str(nic_type_name).lower() == 'virtualvmxnet3':
return vim.vm.device.VirtualVmxnet3()
@classmethod
def find_mac_address(cls, vm):
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
return device.macAddress
return None
@classmethod
def find_cluster(cls, vcenter_service, name):
content = vcenter_service.content
obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.ClusterComputeResource], True)
cluster_list = obj_view.view
obj_view.Destroy()
for cluster in cluster_list:
if cluster.name == name:
return cluster
return None
@classmethod
def find_folder(cls, vcenter_service, name):
content = vcenter_service.content
obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.Folder], True)
folder_list = obj_view.view
obj_view.Destroy()
for folder in folder_list:
if folder.name == name:
return folder
return None
@classmethod
def find_vm_by_name(cls, service_instance, name):
""" Based on the vm_name, returns a VM instance """
virtual_host_name = name
vms_data = cls.collect_properties(service_instance
, view_ref=cls.get_container_view(service_instance,
obj_type=[
vim.VirtualMachine])
, obj_type=vim.VirtualMachine
, path_set=["name", "config.uuid"]
, include_mors=True)
for i, vm_props in enumerate(vms_data):
if vm_props["name"] == virtual_host_name:
return service_instance.content.searchIndex.FindByUuid(None, vm_props["config.uuid"], True)
return None
@classmethod
def find_host_by_name(cls, vcenter_service, virtual_host_context):
"""
Based on the name return a HostSystem instance
:param vcenter_service:
:param name:
:return:
"""
host_name = virtual_host_context.virtual_host.vmware.esx
vms_data = cls.collect_properties(vcenter_service
, view_ref=cls.get_container_view(vcenter_service,
obj_type=[
vim.HostSystem])
, obj_type=vim.HostSystem
, path_set=["name"]
, include_mors=True)
pprint(vms_data)
for i, vm_props in enumerate(vms_data):
if vm_props["name"] == host_name:
return vcenter_service.content.searchIndex.FindByIp(None, host_name, False)
return None
@classmethod
def _get_obj(cls, content, vimtype, name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
@classmethod
def _get_all_objs(cls, content, vimtype):
"""
Get all the vsphere objects associated with a given type
"""
obj = {}
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
obj.update({c: c.name})
return obj
@classmethod
def get_host_by_name(cls, si, name):
"""
Find a virtual machine by it's name and return it
"""
return cls._get_obj(si.RetrieveContent(), [vim.HostSystem], name)
### ----------------------------------------------------------------------
# <community-samples>
### ----------------------------------------------------------------------
@classmethod
def collect_properties(cls, service_instance, view_ref, obj_type, path_set=None, include_mors=False):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Args:
si (ServiceInstance): ServiceInstance connection
view_ref (pyVmomi.vim.view.*): Starting point of inventory navigation
obj_type (pyVmomi.vim.*): Type of managed object
path_set (list): List of properties to retrieve
include_mors (bool): If True include the managed objects
refs in the result
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = "traverseEntities"
traversal_spec.path = "view"
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if include_mors:
properties["obj"] = obj.obj
data.append(properties)
return data
@classmethod
def get_container_view(cls, service_instance, obj_type, container=None):
"""
Get a vSphere Container View reference to all objects of type "obj_type"
It is up to the caller to take care of destroying the View when no longer
needed.
Args:
obj_type (list): A list of managed object types
Returns:
A container view ref to the discovered managed objects
"""
if not container:
container = service_instance.content.rootFolder
view_ref = service_instance.content.viewManager.CreateContainerView(
container=container,
type=obj_type,
recursive=True
)
return view_ref
### ----------------------------------------------------------------------
# </community-samples>
### ----------------------------------------------------------------------
@classmethod
def WaitTask(cls, task):
""" Wait for the given task to be completed and returns result when done """
while task.info.state == vim.TaskInfo.State.running:
time.sleep(1)
return task.info.result
@classmethod
def create_hdd_controller(cls):
#
# Scsi controller instantiation and operation setup
#
hdd_controller_specs = vim.vm.device.VirtualDeviceSpec()
hdd_controller_specs.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
hdd_controller_specs.device = vim.vm.device.ParaVirtualSCSIController()
hdd_controller_specs.device.key = 1000
hdd_controller_specs.device.controllerKey = 100
hdd_controller_specs.device.device = [2000]
#
# Device construction
# hdd_controller_specs.device.device: list of devices held by controller
#
hdd_controller_specs.device.sharedBus = "noSharing"
return hdd_controller_specs
@classmethod
def create_hdd(cls, disk_size_kb):
hdd_specs = vim.vm.device.VirtualDeviceSpec()
hdd_specs.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
hdd_specs.fileOperation = "create"
hdd_specs.device = vim.vm.device.VirtualDisk()
hdd_specs.device.key = 2000
#
# Hdd basic specs
#
hdd_specs.device.capacityInKB = disk_size_kb
hdd_specs.device.controllerKey = 1000
hdd_specs.device.unitNumber = 0
#
# sharing specs
#
hdd_specs.device.shares = vim.SharesInfo()
hdd_specs.device.shares.shares = 1000
hdd_specs.device.shares.level = "normal"
hdd_backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
hdd_backing.diskMode = vim.vm.device.VirtualDiskOption.DiskMode.persistent
hdd_specs.device.backing = hdd_backing
return hdd_specs
@classmethod
def create_network_adapter(cls, nic_type, network):
network_adapter_specs = vim.vm.device.VirtualDeviceSpec()
network_adapter_specs.device = cls.create_nic_from_type_name(nic_type)
network_adapter_specs.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
network_adapter_specs.device.key = 4000
network_adapter_specs.device.controllerKey = 100
network_adapter_backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
network_adapter_backing.inPassthroughMode = False
network_adapter_backing.deviceName = network
network_adapter_specs.device.backing = network_adapter_backing
network_adapter_specs.device.addressType = "assigned"
network_adapter_specs.device.wakeOnLanEnabled = True
return network_adapter_specs
@classmethod
def create_pci_controller(cls):
""" Simple pci controller configuration used for cdrom """
pci_controller_specs = vim.vm.device.VirtualDeviceSpec()
pci_controller_specs.device = vim.vm.device.VirtualPCIController()
pci_controller_specs.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
pci_controller_specs.device.key = 100
pci_controller_specs.device.device = [4000]
pci_controller_specs.device.busNumber = 0
return pci_controller_specs
@classmethod
def create_cdrom_drive(cls):
""" Basic cdrom drive config """
virtual_cdrom_specs = vim.vm.device.VirtualDeviceSpec()
virtual_cdrom_specs.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
virtual_cdrom_specs.device = vim.vm.device.VirtualCdrom()
virtual_cdrom_specs.device.controllerKey = 201
virtual_cdrom_specs.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
virtual_cdrom_specs.device.backing.exclusive = False
virtual_cdrom_specs.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
virtual_cdrom_specs.device.connectable.connected = False
virtual_cdrom_specs.device.connectable.startConnected = False
virtual_cdrom_specs.device.connectable.allowGuestControl = True
return virtual_cdrom_specs
@staticmethod
def mount_iso(iso_file_path):
""" Mount an iso file at the specified location """
virtual_cdrom_specs = vim.vm.device.VirtualDeviceSpec()
virtual_cdrom_specs.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
virtual_cdrom_specs.device = vim.vm.device.VirtualCdrom()
# controllerKey is tied to IDE Controller
virtual_cdrom_specs.device.controllerKey = 201
# key is needed to mount the iso, need to verify if this value
# changes per host, and if so, then logic needs to be added to
# obtain it
virtual_cdrom_specs.device.key = 3002
virtual_cdrom_specs.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
virtual_cdrom_specs.device.backing.fileName = iso_file_path
virtual_cdrom_specs.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
virtual_cdrom_specs.device.connectable.connected = True
virtual_cdrom_specs.device.connectable.startConnected = True
virtual_cdrom_specs.device.connectable.allowGuestControl = True
return virtual_cdrom_specs
@staticmethod
def unmount_iso():
""" Mount an iso file at the specified location """
virtual_cdrom_specs = vim.vm.device.VirtualDeviceSpec()
virtual_cdrom_specs.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
virtual_cdrom_specs.device = vim.vm.device.VirtualCdrom()
# controllerKey is tied to IDE Controller
virtual_cdrom_specs.device.controllerKey = 201
# key is needed to mount the iso, need to verify if this value
# changes per host, and if so, then logic needs to be added to
# obtain it
virtual_cdrom_specs.device.key = 3002
virtual_cdrom_specs.device.backing = vim.vm.device.VirtualCdrom.RemoteAtapiBackingInfo()
virtual_cdrom_specs.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
virtual_cdrom_specs.device.connectable.connected = False
virtual_cdrom_specs.device.connectable.startConnected = False
virtual_cdrom_specs.device.connectable.allowGuestControl = False
return virtual_cdrom_specs
@staticmethod
def add_note(note, vm):
note_spec = vim.vm.ConfigSpec()
note_spec.annotation = note
add_note_task = vm.ReconfigVM_Task(note_spec)
PyVmomiHelper.WaitTask(add_note_task)
if __name__ == '__main__':
vcenter_service =connect.SmartConnect(
host='mtxvcef01',
user='xx',
pwd='yy',
port=443
)
logger.info("preparing virtual host creation config...")
content = vcenter_service.RetrieveContent()
datacenter = content.rootFolder.childEntity[0]
vm_folder = datacenter.vmFolder
hosts = datacenter.hostFolder.childEntity
resource_pool = hosts[0].resourcePool
pprint(resource_pool)
pprint(PyVmomiHelper._get_all_objs(vcenter_service.RetrieveContent(), [vim.Datastore]))
esx = PyVmomiHelper.get_host_by_name(vcenter_service, '10.180.201.133')
#esx = vcenter_service.content.searchIndex.FindByIp(None, '10.180.201.133', False)
logger.debug(esx)
devices_creation = [
PyVmomiHelper.create_hdd_controller(),
PyVmomiHelper.create_hdd(51000000),
PyVmomiHelper.create_pci_controller(),
PyVmomiHelper.create_network_adapter(
'VirtualVmxnet3',
'VM Network 32'
),
PyVmomiHelper.create_cdrom_drive()
]
vmx_file = vim.vm.FileInfo(
logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName="[{ds}]{vm}".format(
ds='MTB_INFRA',
vm='mtbtstf01'
)
)
vm_config = vim.vm.ConfigSpec(
name='mtbtstf01',
memoryMB=4096,
numCPUs=4,
guestId=vim.vm.GuestOsDescriptor.GuestOsIdentifier.otherLinux64Guest,
files=vmx_file,
version='vmx-08',
deviceChange=devices_creation
)
logger.debug("creation specs generated")
logger.debug("pushing creation specs to vcenter")
#don't work
create_task = datacenter.vmFolder.CreateVM_Task(
config=vm_config,
pool=resource_pool,
host=esx
)
#work
#create_task = datacenter.vmFolder.CreateVM_Task(
# config=vm_config,
# pool=resource_pool,
#)
PyVmomiHelper.WaitTask(create_task)
vm = PyVmomiHelper.find_vm_by_name(vcenter_service, 'mtbtstf01')
logger.debug(PyVmomiHelper.find_mac_address(vm))
#relocate_spec = vim.vm.RelocateSpec(host=esx)
#vm.Relocate(relocate_spec)
#logger.debug(PyVmomiHelper.find_mac_address(vm))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment