Skip to content

Instantly share code, notes, and snippets.

@cloudnull
Last active January 21, 2022 17:15
Show Gist options
  • Star 3 You must be signed in to star a gist
  • Fork 2 You must be signed in to fork a gist
  • Save cloudnull/c4edb01fa28d3f57994bbb791e634a75 to your computer and use it in GitHub Desktop.
Save cloudnull/c4edb01fa28d3f57994bbb791e634a75 to your computer and use it in GitHub Desktop.
VM and vBMC Setup - installation and setup example
# install tripleo repos
# This will pull the latest version RPM
PKG=$(curl https://trunk.rdoproject.org/centos7/current/ | grep python2-tripleo-repos- | awk -F'"' '{print $8}')
# This will install it
yum install -y https://trunk.rdoproject.org/centos7/current/${PKG}
yum -y upgrade
yum install -y python-tripleoclient
# fix resolvers
yum install systemd-resolved
systemctl enable systemd-resolved
systemctl start systemd-resolved
systemctl status systemd-resolved
ln -sf /var/run/systemd/resolve/resolv.conf /etc/resolv.conf
{
"nodes": [
{
"name": "overcloud-controller1",
"pm_type": "ipmi",
"ports": [
{
"address": "52:54:00:cd:7e:ee",
"physical_network": "provisioning"
}
],
"cpu": "4",
"memory": "5120",
"disk": "96",
"arch": "x86_64",
"pm_user": "admin",
"pm_password": "secrete",
"pm_addr": "127.0.0.1",
"_comment": "overcloud controller 1 on undercloud node"
},
{
"name": "compute2",
"pm_type": "ipmi",
"ports": [
{
"address": "00:25:90:70:ca:2b",
"physical_network": "provisioning"
}
],
"cpu": "32",
"memory": "65536",
"disk": "50",
"arch": "x86_64",
"pm_user": "ADMIN",
"pm_password": "ADMIN",
"pm_addr": "172.16.27.147",
"_comment": "SuperMicro Compute2"
}
]
}
cat > /etc/modules-load.d/virtio.conf <<EOF
virtio_rng
virtio_balloon
virtio_scsi
virtio_net
virtio_ring
virtio
EOF
cat > /etc/modules-load.d/8021q.conf <<EOF
8021q
EOF
grep -hv '^#' /etc/modules-load.d/*.conf | xargs -n 1 modprobe --first-time
nmcli con add type vlan ifname VLAN10 dev em1 id 10 ip4 172.17.27.216/22
useradd stack
echo secrete | passwd --stdin stack
echo -e "stack ALL=(root) NOPASSWD:ALL" | sudo tee -a /etc/sudoers.d/stack
chmod 0440 /etc/sudoers.d/stack
yum -y install ansible
if [ -d "/opt/tripleo-validations" ]; then
rm -rf "/opt/tripleo-validations"
fi
git clone https://git.openstack.org/openstack/tripleo-validations /opt/tripleo-validations
export UNDERCLOUD_HOST=$(hostnamectl status | awk '/hostname/ {print $3}')
ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
cp /root/.ssh/id_rsa.pub ~/.ssh/authorized_keys
pushd /opt/tripleo-validations
echo -e "[undercloud]\n$UNDERCLOUD_HOST ansible_connection=local ansible_host=$(ip -o r g 1 | awk '{print $7}')" | tee hosts
grep -l '^\s\+-\s\+prep' -r validations | xargs -n 1 ansible-playbook -i hosts
popd
[DEFAULT]
#
# From undercloud_config
#
# List of additional architectures enabled in your cloud environment.
# The list of supported values is: ppc64le (list value)
#additional_architectures =
# The certmonger nickname of the CA from which the certificate will be
# requested. This is used only if the generate_service_certificate
# option is set. Note that if the "local" CA is selected the
# certmonger's local CA certificate will be extracted to /etc/pki/ca-
# trust/source/anchors/cm-local-ca.pem and subsequently added to the
# trust chain. (string value)
#certificate_generation_ca = local
# Whether to clean overcloud nodes (wipe the hard drive) between
# deployments and after the introspection. (boolean value)
clean_nodes = false
# Cleanup temporary files. Setting this to False will leave the
# temporary files used during deployment in place after the command is
# run. This is useful for debugging the generated files or if errors
# occur. (boolean value)
cleanup = false
# Container CLI used for deployment; Can be docker or podman. (string
# value)
container_cli = podman
# Whether or not we disable the container healthchecks. (boolean
# value)
#container_healthcheck_disabled = false
# Heat environment file with parameters for all required container
# images. Or alternatively, parameter "ContainerImagePrepare" to drive
# the required image preparation. (string value)
#container_images_file =
# Used to add custom insecure registries for containers. (list value)
# Deprecated group/name - [DEFAULT]/docker_insecure_registries
#container_insecure_registries =
# An optional container registry mirror that will be used. (string
# value)
# Deprecated group/name - [DEFAULT]/docker_registry_mirror
#container_registry_mirror =
# List of any custom environment yaml files to use. These are applied
# after any other configuration and can be used to override any
# derived values. This should be used only by advanced users. (list
# value)
#custom_env_files =
# User used to run openstack undercloud install command which will be
# used to add the user to the docker group, required to upload
# containers (string value)
#deployment_user = <None>
# The default driver or hardware type to use for newly discovered
# nodes (requires enable_node_discovery set to True). It is
# automatically added to enabled_hardware_types. (string value)
discovery_default_driver = ipmi
# DEPRECATED: Docker bridge IP for the undercloud. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#docker_bip = --bip=172.31.0.1/24
# Whether to install the Volume service. It is not currently used in
# the undercloud. (boolean value)
#enable_cinder = false
# Whether to enable the ironic service. (boolean value)
enable_ironic = true
# Whether to enable the ironic inspector service. (boolean value)
enable_ironic_inspector = true
# Whether to enable the mistral service. (boolean value)
enable_mistral = true
# Makes ironic-inspector enroll any unknown node that PXE-boots
# introspection ramdisk in Ironic. By default, the "fake" driver is
# used for new nodes (it is automatically enabled when this option is
# set to True). Set discovery_default_driver to override.
# Introspection rules can also be used to specify driver information
# for newly enrolled nodes. (boolean value)
#enable_node_discovery = false
# Whether to install novajoin metadata service in the Undercloud.
# (boolean value)
#enable_novajoin = false
# Enable support for routed ctlplane networks. (boolean value)
#enable_routed_networks = false
# Whether to enable Swift encryption at-rest or not. (boolean value)
#enable_swift_encryption = false
# Whether to install Telemetry services (ceilometer, gnocchi, aodh,
# panko ) in the Undercloud. (boolean value)
enable_telemetry = false
# Whether to install Tempest in the Undercloud.This is a no-op for
# containerized undercloud. (boolean value)
#enable_tempest = true
# DEPRECATED: Whether to install the TripleO UI. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#enable_ui = true
# Whether to install requirements to run the TripleO validations.
# (boolean value)
#enable_validations = true
# Whether to enable the zaqar service. (boolean value)
enable_zaqar = true
# List of enabled bare metal hardware types (next generation drivers).
# (list value) ipmi,redfish,ilo,idrac
enabled_hardware_types = ipmi
# When set to True, an SSL certificate will be generated as part of
# the undercloud install and this certificate will be used in place of
# the value for undercloud_service_certificate. The resulting
# certificate will be written to
# /etc/pki/tls/certs/undercloud-[undercloud_public_host].pem. This
# certificate is signed by CA selected by the
# "certificate_generation_ca" option. (boolean value)
#generate_service_certificate = true
# URL for the heat container image to use. (string value)
#heat_container_image =
# Execute the heat-all process natively on this host. This option
# requires that the heat-all binaries be installed locally on this
# machine. This option is enabled by default which means heat-all is
# executed on the host OS directly. (boolean value)
#heat_native = true
# Path to hieradata override file. Relative paths get computed inside
# of $HOME. When it points to a heat env file, it is passed in t-h-t
# via "-e <file>", as is. When the file contains legacy instack data,
# it is wrapped with UndercloudExtraConfig and also passed in for
# t-h-t as a temp file created in output_dir. Note, instack hiera data
# may be not t-h-t compatible and will highly likely require a manual
# revision. (string value)
#hieradata_override =
# Whether to enable extra hardware collection during the inspection
# process. Requires python-hardware or python-hardware-detect package
# on the introspection image. (boolean value)
#inspection_extras = true
# Network interface on which inspection dnsmasq will listen. If in
# doubt, use the default value. (string value)
# Deprecated group/name - [DEFAULT]/discovery_interface
#inspection_interface = br-ctlplane
# Whether to run benchmarks when inspecting nodes. Requires
# inspection_extras set to True. (boolean value)
# Deprecated group/name - [DEFAULT]/discovery_runbench
#inspection_runbench = false
# One Time Password to register Undercloud node with an IPA server.
# Required when enable_novajoin = True. (string value)
#ipa_otp =
# Whether to use iPXE for deploy and inspection. (boolean value)
# Deprecated group/name - [DEFAULT]/ipxe_deploy
ipxe_enabled = true
# Network interface on the Undercloud that will be handling the PXE
# boots and DHCP for Overcloud instances. (string value)
local_interface = MACVLAN1
# IP information for the interface on the Undercloud that will be
# handling the PXE boots and DHCP for Overcloud instances. The IP
# portion of the value will be assigned to the network interface
# defined by local_interface, with the netmask defined by the prefix
# portion of the value. (string value)
local_ip = 172.16.27.216/22
# MTU to use for the local_interface. (integer value)
#local_mtu = 1500
# Name of the local subnet, where the PXE boot and DHCP interfaces for
# overcloud instances is located. The IP address of the
# local_ip/local_interface should reside in this subnet. (string
# value)
local_subnet = ctlplane-subnet
# Path to network config override template.Relative paths get computed
# inside of $HOME. Must be in the json format.Its content overrides
# anything in t-h-t UndercloudNetConfigOverride. The processed
# template is then passed in Heat via the undercloud_parameters.yaml
# file created in output_dir and used to configure the networking via
# run-os-net-config. If you wish to disable you can set this location
# to an empty file.Templated for instack j2 tags may be used, for
# example:
#
# "network_config": [
# {
# "type": "ovs_bridge",
# "name": "br-ctlplane",
# "ovs_extra": [
# "br-set-external-id br-ctlplane bridge-id br-ctlplane"
# ],
# "members": [
# {
# "type": "interface",
# "name": "{{LOCAL_INTERFACE}}",
# "primary": "true",
# "mtu": {{LOCAL_MTU}},
# "dns_servers": {{UNDERCLOUD_NAMESERVERS}}
# }
# ],
# "addresses": [
# {
# "ip_netmask": "{{PUBLIC_INTERFACE_IP}}"
# }
# ],
# "routes": {{SUBNETS_STATIC_ROUTES}},
# "mtu": {{LOCAL_MTU}}
# }
# ]
# (string value)
#net_config_override =
# Networks file to override for heat. May be an absolute path or the
# path relative to the t-h-t templates directory used for deployment
# (string value)
#networks_file = <None>
# Directory to output state, processed heat templates, ansible
# deployment files. (string value)
#output_dir = /builddir
# DNS domain name to use when deploying the overcloud. The overcloud
# parameter "CloudDomain" must be set to a matching value. (string
# value)
overcloud_domain_name = rk-home
# Roles file to override for heat. May be an absolute path or the path
# relative to the t-h-t templates directory used for deployment
# (string value)
#roles_file = <None>
# Maximum number of attempts the scheduler will make when deploying
# the instance. You should keep it greater or equal to the number of
# bare metal nodes you expect to deploy at once to work around
# potential race condition when scheduling. (integer value)
# Minimum value: 1
#scheduler_max_attempts = 30
# The kerberos principal for the service that will use the
# certificate. This is only needed if your CA requires a kerberos
# principal. e.g. with FreeIPA. (string value)
#service_principal =
# List of routed network subnets for provisioning and introspection.
# Comma separated list of names/tags. For each network a section/group
# needs to be added to the configuration file with these parameters
# set: cidr, dhcp_start, dhcp_end, inspection_iprange, gateway and
# masquerade_network. Note: The section/group must be placed before or
# after any other section. (See the example section [ctlplane-subnet]
# in the sample configuration file.) (list value)
subnets = ctlplane-subnet
# heat templates file to override. (string value)
#templates =
# Virtual IP or DNS address to use for the admin endpoints of
# Undercloud services. Only used with SSL. (string value)
# Deprecated group/name - [DEFAULT]/undercloud_admin_vip
undercloud_admin_host = 172.16.27.216
# Whether to enable the debug log level for Undercloud OpenStack
# services and Container Image Prepare step. (boolean value)
#undercloud_debug = true
# Enable or disable SELinux during the deployment. (boolean value)
#undercloud_enable_selinux = true
# Fully qualified hostname (including domain) to set on the
# Undercloud. If left unset, the current hostname will be used, but
# the user is responsible for configuring all system hostname settings
# appropriately. If set, the undercloud install will configure all
# system hostname settings. (string value)
undercloud_hostname = undercloud.rk-home
# The path to a log file to store the undercloud install/upgrade logs.
# (string value)
#undercloud_log_file = install-undercloud.log
# DNS nameserver(s). Use for the undercloud node and for the overcloud
# nodes. (NOTE: To use different nameserver(s) for the overcloud,
# override the DnsServers parameter in overcloud environment.) (list
# value)
#undercloud_nameservers =
# List of ntp servers to use. (list value)
#undercloud_ntp_servers = 0.pool.ntp.org,1.pool.ntp.org,2.pool.ntp.org,3.pool.ntp.org
# Virtual IP or DNS address to use for the public endpoints of
# Undercloud services. Only used with SSL. (string value)
# Deprecated group/name - [DEFAULT]/undercloud_public_vip
undercloud_public_host = 172.17.27.216
# Certificate file to use for OpenStack service SSL connections.
# Setting this enables SSL for the OpenStack API endpoints, leaving it
# unset disables SSL. (string value)
#undercloud_service_certificate =
# Host timezone to be used. If no timezone is specified, the existing
# timezone configuration is used. (string value)
#undercloud_timezone = <None>
# Whether to update packages during the Undercloud install. This is a
# no-op for containerized undercloud. (boolean value)
#undercloud_update_packages = false
# (Experimental) Whether to clean undercloud rpms after an upgrade to
# a containerized undercloud. (boolean value)
#upgrade_cleanup = false
[ctlplane-subnet]
#
# From undercloud_config
#
# Network CIDR for the Neutron-managed subnet for Overcloud instances.
# (string value)
# Deprecated group/name - [DEFAULT]/network_cidr
cidr = 172.16.24.2/22
# End of DHCP allocation range for PXE and DHCP of Overcloud instances
# on this network. (list value)
# Deprecated group/name - [DEFAULT]/dhcp_end
dhcp_end = 172.16.25.15
# List of IP addresses or IP ranges to exclude from the subnets
# allocation pool. Example: 192.168.24.50,192.168.24.80-192.168.24.90
# (list value)
#dhcp_exclude =
# Start of DHCP allocation range for PXE and DHCP of Overcloud
# instances on this network. (list value)
# Deprecated group/name - [DEFAULT]/dhcp_start
dhcp_start = 172.16.25.1
# Network gateway for the Neutron-managed network for Overcloud
# instances on this network. (string value)
# Deprecated group/name - [DEFAULT]/network_gateway
gateway = 172.16.24.2
# Host routes for the Neutron-managed subnet for the Overcloud
# instances on this network. The host routes on the local_subnet will
# also be configured on the undercloud. (list value)
#
# This option has a sample default set, which means that
# its actual default value may vary from the one documented
# below.
#host_routes = [{destination: 10.10.10.0, nexthop: 192.168.24.1}]
# Temporary IP range that will be given to nodes on this network
# during the inspection process. Should not overlap with the range
# defined by dhcp_start and dhcp_end, but should be in the same ip
# subnet. (string value)
# Deprecated group/name - [DEFAULT]/inspection_iprange
inspection_iprange = 172.16.25.16,172.16.25.30
# The network will be masqueraded for external access. (boolean value)
masquerade = false
/opt/vbmc/bin/vbmc add overcloud-controller1 --port 16001 \
--username admin \
--password secrete
/opt/vbmc/bin/vbmc start overcloud-controller1
/opt/vbmc/bin/vbmc list
ipmitool -I lanplus -U admin -P secrete -H 127.0.0.1 -p 16001 power status
yum install python-virtualenv || apt install virtualenv
virtualenv --system-site-packages /opt/vbmc
/opt/vbmc/bin/pip install virtualbmc
cat > /etc/systemd/system/vbmcd.service <<EOF
[Install]
WantedBy = multi-user.target
[Service]
BlockIOAccounting = True
CPUAccounting = True
ExecReload = /bin/kill -HUP $MAINPID
ExecStart = /opt/vbmc/bin/vbmcd --foreground
Group = root
MemoryAccounting = True
PrivateDevices = False
PrivateNetwork = False
PrivateTmp = False
PrivateUsers = False
Restart = on-failure
RestartSec = 2
Slice = vbmc.slice
TasksAccounting = True
TimeoutSec = 120
Type = simple
User = root
[Unit]
After = libvirtd.service
After = syslog.target
After = network.target
Description = vbmc service
EOF
systemctl daemon-reload
systemctl enable vbmcd.service
systemctl start vbmcd.service
systemctl status vbmcd.service
qemu-img create -f qcow2 /var/lib/libvirt/images/overcloud-controller1.qcow2 96G
virt-install --ram 5120 \
--vcpus=4,sockets=1,cores=4 \
--os-variant rhel7 \
--disk path=/var/lib/libvirt/images/overcloud-controller1.qcow2,device=disk,bus=virtio,format=qcow2,discard=unmap \
--graphics vnc \
--network network:provisioning,model=virtio \
--network network:external,model=virtio \
--name overcloud-controller1 \
--hvm \
--virt-type kvm \
--memballoon virtio \
--cpu host \
--dry-run \
--rng /dev/urandom \
--print-xml > /etc/libvirt/qemu/overcloud-controller1.xml
virsh define --file /etc/libvirt/qemu/overcloud-controller1.xml
# Gather mac information from VM(s) it will be needed later.
grep -A1 "mac address" /etc/libvirt/qemu/overcloud-controller1.xml
cat > /etc/libvirt/networks/external.xml <<EOF
<network>
<name>provisioning</name>
<forward mode="bridge">
<interface dev="em1"/>
</forward>
</network>
EOF
cat > /etc/libvirt/networks/provisioning.xml <<EOF
<network>
<name>provisioning</name>
<forward mode="bridge">
<interface dev="VLAN10"/>
</forward>
</network>
EOF
virsh net-define /etc/libvirt/networks/external.xml
virsh net-autostart external
virsh net-start external
virsh net-define /etc/libvirt/networks/provisioning.xml
virsh net-autostart provisioning
virsh net-start provisioning
virsh net-destroy default
virsh net-undefine default
virsh net-list
apt install virtinst ipmitool python-dev
# Install libvirt packages
yum install qemu-kvm qemu-img libvirt libvirt-python libvirt-client virt-install bridge-utils ipmitool OpenIPMI
# Start libvirt
systemctl enable libvirtd.service
systemctl start libvirtd.service
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment