Skip to content

Instantly share code, notes, and snippets.

@raphapr
Last active August 29, 2015 14:02
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save raphapr/8e7896a738c6f6e6d27d to your computer and use it in GitHub Desktop.
Save raphapr/8e7896a738c6f6e6d27d to your computer and use it in GitHub Desktop.
compute node
2014-06-26 15:04:21.748 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:21.748 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:21.748 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager.update_available_resource run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:21.749 13970 DEBUG nova.openstack.common.lockutils [-] Got semaphore "compute_resources" lock /usr/lib/python2.6/site-packages/nova/openstack/common/lockutils.py:168
2014-06-26 15:04:21.749 13970 DEBUG nova.openstack.common.lockutils [-] Got semaphore / lock "update_available_resource" inner /usr/lib/python2.6/site-packages/nova/openstack/common/lockutils.py:248
2014-06-26 15:04:21.749 13970 AUDIT nova.compute.resource_tracker [-] Auditing locally available compute resources
2014-06-26 15:04:21.749 13970 DEBUG nova.virt.libvirt.driver [-] Updating host stats update_status /usr/lib/python2.6/site-packages/nova/virt/libvirt/driver.py:5247
2014-06-26 15:04:21.843 13970 DEBUG nova.compute.resource_tracker [-] Hypervisor: free ram (MB): 23433 _report_hypervisor_resource_view /usr/lib/python2.6/site-packages/nova/compute/resource_tracker.py:409
2014-06-26 15:04:21.843 13970 DEBUG nova.compute.resource_tracker [-] Hypervisor: free disk (GB): 132 _report_hypervisor_resource_view /usr/lib/python2.6/site-packages/nova/compute/resource_tracker.py:410
2014-06-26 15:04:21.843 13970 DEBUG nova.compute.resource_tracker [-] Hypervisor: free VCPUs: 16 _report_hypervisor_resource_view /usr/lib/python2.6/site-packages/nova/compute/resource_tracker.py:415
2014-06-26 15:04:21.843 13970 DEBUG nova.compute.resource_tracker [-] Hypervisor: assignable PCI devices: [] _report_hypervisor_resource_view /usr/lib/python2.6/site-packages/nova/compute/resource_tracker.py:422
2014-06-26 15:04:21.907 13970 AUDIT nova.compute.resource_tracker [-] Free ram (MB): 22485
2014-06-26 15:04:21.907 13970 AUDIT nova.compute.resource_tracker [-] Free disk (GB): 132
2014-06-26 15:04:21.908 13970 AUDIT nova.compute.resource_tracker [-] Free VCPUS: 14
2014-06-26 15:04:21.955 13970 INFO nova.compute.resource_tracker [-] Compute_service record updated for compute1:compute1
2014-06-26 15:04:21.956 13970 DEBUG nova.openstack.common.lockutils [-] Semaphore / lock released "update_available_resource" inner /usr/lib/python2.6/site-packages/nova/openstack/common/lockutils.py:252
2014-06-26 15:04:21.983 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:21.983 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:21.983 13970 DEBUG nova.compute.manager [-] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python2.6/site-packages/nova/compute/manager.py:5364
2014-06-26 15:04:21.984 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:21.984 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:21.984 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._poll_bandwidth_usage run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:21.984 13970 INFO nova.compute.manager [-] Updating bandwidth usage cache
2014-06-26 15:04:22.024 13970 WARNING nova.compute.manager [-] Bandwidth usage not supported by hypervisor.
2014-06-26 15:04:22.024 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:22.024 13970 DEBUG nova.compute.manager [-] Cleaning up deleted instances _run_pending_deletes /usr/lib/python2.6/site-packages/nova/compute/manager.py:5642
2014-06-26 15:04:22.060 13970 DEBUG nova.compute.manager [-] There are 1 instances to clean _run_pending_deletes /usr/lib/python2.6/site-packages/nova/compute/manager.py:5651
2014-06-26 15:04:22.060 13970 DEBUG nova.compute.manager [-] [instance: 87f6030f-6f3b-47df-8f6f-41c2928a51d1] Instance has had 5 of 5 cleanup attempts _run_pending_deletes /usr/lib/python2.6/site-packages/nova/compute/manager.py:5659
2014-06-26 15:04:22.060 13970 DEBUG nova.openstack.common.periodic_task [-] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python2.6/site-packages/nova/openstack/common/periodic_task.py:178
2014-06-26 15:04:22.060 13970 DEBUG nova.openstack.common.loopingcall [-] Dynamic looping call sleeping for 3.51 seconds _inner /usr/lib/python2.6/site-packages/nova/openstack/common/loopingcall.py:132
2014-06-26 15:04:24.874 13970 INFO nova.openstack.common.service [-] Caught SIGTERM, exiting
2014-06-26 15:04:25.549 18877 INFO oslo.messaging._drivers.impl_qpid [-] Connected to AMQP server on controller:5672
2014-06-26 15:04:25.558 18877 INFO oslo.messaging._drivers.impl_qpid [-] Connected to AMQP server on controller:5672
2014-06-26 15:04:26.257 18877 INFO oslo.messaging._drivers.impl_qpid [-] Connected to AMQP server on controller:5672
2014-06-26 15:04:48.367 18877 WARNING nova.virt.disk.vfs.guestfs [req-1b93e815-b8ee-457a-9eac-2d30aa51c031 8d8b6dbdacc6402b960b964b00bf8d14 30f220b0dca34241b9e4feb0bd117fe8] Failed to close augeas aug_close: do_aug_close: you must call 'aug-init' first to initialize Augeas
2014-06-26 15:04:48.612 18877 ERROR nova.compute.manager [req-1b93e815-b8ee-457a-9eac-2d30aa51c031 8d8b6dbdacc6402b960b964b00bf8d14 30f220b0dca34241b9e4feb0bd117fe8] [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] Instance failed to spawn
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] Traceback (most recent call last):
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/compute/manager.py", line 1714, in _spawn
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] block_device_info)
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/virt/libvirt/driver.py", line 2262, in spawn
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] write_to_disk=True)
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/virt/libvirt/driver.py", line 3443, in to_xml
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] disk_info, rescue, block_device_info)
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/virt/libvirt/driver.py", line 3259, in get_guest_config
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] flavor)
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/virt/libvirt/vif.py", line 397, in get_config
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] _("Unexpected vif_type=%s") % vif_type)
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] NovaException: Unexpected vif_type=binding_failed
2014-06-26 15:04:48.612 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b]
2014-06-26 15:04:48.722 18877 ERROR nova.virt.libvirt.driver [-] [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] During wait destroy, instance disappeared.
2014-06-26 15:04:49.300 18877 ERROR nova.compute.manager [req-1b93e815-b8ee-457a-9eac-2d30aa51c031 8d8b6dbdacc6402b960b964b00bf8d14 30f220b0dca34241b9e4feb0bd117fe8] [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] Error: Unexpected vif_type=binding_failed
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] Traceback (most recent call last):
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/compute/manager.py", line 1305, in _build_instance
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] set_access_ip=set_access_ip)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/compute/manager.py", line 393, in decorated_function
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] return function(self, context, *args, **kwargs)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/compute/manager.py", line 1717, in _spawn
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] LOG.exception(_('Instance failed to spawn'), instance=instance)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/openstack/common/excutils.py", line 68, in __exit__
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] six.reraise(self.type_, self.value, self.tb)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/compute/manager.py", line 1714, in _spawn
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] block_device_info)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/virt/libvirt/driver.py", line 2262, in spawn
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] write_to_disk=True)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/virt/libvirt/driver.py", line 3443, in to_xml
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] disk_info, rescue, block_device_info)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/virt/libvirt/driver.py", line 3259, in get_guest_config
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] flavor)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] File "/usr/lib/python2.6/site-packages/nova/virt/libvirt/vif.py", line 397, in get_config
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] _("Unexpected vif_type=%s") % vif_type)
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b] NovaException: Unexpected vif_type=binding_failed
2014-06-26 15:04:49.300 18877 TRACE nova.compute.manager [instance: 18a4d886-ac86-488d-ae1c-54dedf3bd22b]
2014-06-26 15:04:54.680 18877 WARNING nova.compute.manager [req-ac4904d4-3c9c-4147-993d-5ffeb4243073 None None] Found 3 in the database and 0 on the hypervisor
[ml2]
type_drivers = gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[ovs]
local_ip = 10.0.1.31
tunnel_type = gre
enable_tunneling = True
[database]
connection = mysql://neutron:password@controller/neutron
[DEFAULT]
auth_strategy = keystone
rpc_backend = neutron.openstack.common.rpc.impl_qpid
qpid_hostname = controller
core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
verbose = True
notification_driver = neutron.openstack.common.notifier.rpc_notifier
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
[quotas]
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
[keystone_authtoken]
auth_uri = http://controller:5000
auth_host = controller
auth_protocol = http
auth_port = 35357
admin_tenant_name = service
admin_user = neutron
admin_password = password
[database]
[service_providers]
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
[DEFAULT]
auth_strategy = keystone
rpc_backend = qpid
qpid_hostname = controller
my_ip = 10.0.0.31
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.0.0.31
novncproxy_base_url = http://controller:6080/vnc_auto.html
glance_host = controller
network_api_class = nova.network.neutronv2.api.API
neutron_url = http://controller:9696
neutron_auth_strategy = keystone
neutron_admin_tenant_name = service
neutron_admin_username = neutron
neutron_admin_password = 91b684dbdf5bcffef3b5
neutron_admin_auth_url = http://controller:35357/v2.0
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
service_down_time=120
compute_driver=libvirt.LibvirtDriver
vif_plugging_is_fatal=false
vif_plugging_timeout=0
[baremetal]
[cells]
[conductor]
[database]
connection = mysql://nova:30b3ecd8302385e7a7b6@controller/nova
[hyperv]
[image_file_url]
[keymgr]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_host = controller
auth_protocol = http
auth_port = 35357
admin_user = nova
admin_tenant_name = service
admin_password = e0e7d181d63b10adfb2b
[libvirt]
virt_type=qemu
[matchmaker_ring]
[metrics]
[osapi_v3]
[rdp]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[xenserver]
[zookeeper]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment