-
-
Save spotz/42b9d15f016073458742268735644578 to your computer and use it in GitHub Desktop.
MariaDB [nova_api]> select * from cell_mappings; | |
+---------------------+------------+----+--------------------------------------+-------+----------------------------------------+------------------------------------------------------------+ | |
| created_at | updated_at | id | uuid | name | transport_url | database_connection | | |
+---------------------+------------+----+--------------------------------------+-------+----------------------------------------+------------------------------------------------------------+ | |
| 2017-02-27 20:14:18 | NULL | 1 | 00000000-0000-0000-0000-000000000000 | cell0 | none:/// | mysql+pymysql://nova:linuxacademy123@controller/nova_cell0 | | |
| 2017-02-27 20:16:04 | NULL | 2 | 143df967-eb22-4d2c-baff-bfc2538816fd | cell1 | rabbit://openstack:rabbitmq@controller | mysql+pymysql://nova:linuxacademy123@controller/nova | | |
+---------------------+------------+----+--------------------------------------+-------+----------------------------------------+----------------------------------------------------------- |
root@controller:~# nova-manage cell_v2 discover_hosts --verbose
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting compute nodes from cell 'cell1': 143df967-eb22-4d2c-baff-bfc2538816fd
Found 1 computes in cell: 143df967-eb22-4d2c-baff-bfc2538816fd
Checking host mapping for compute host 'compute': 2e37ecce-5d56-4cf1-ab98-0a697b9373d2
MariaDB [nova_api]> select * from host_mappings;
+---------------------+------------+----+---------+---------+
| created_at | updated_at | id | cell_id | host |
+---------------------+------------+----+---------+---------+
| 2017-02-28 15:31:57 | NULL | 1 | 2 | compute |
+---------------------+------------+----+---------+---------+
1 row in set (0.00 sec)
nova-api.log controller
2017-02-28 09:29:45.564 2470 WARNING oslo_reports.guru_meditation_report [-] Guru meditation now registers SIGUSR1 and SIGUSR2 by default for backward compatibility. SIGUSR1 will no longer be registered in a future release, so please use SIGUSR2 to generate reports.
2017-02-28 09:29:46.469 2470 WARNING oslo_config.cfg [-] Option "use_neutron" from group "DEFAULT" is deprecated for removal. Its value may be silently ignored in the future.
2017-02-28 09:29:47.411 2470 INFO nova.api.openstack [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] Loaded extensions: ['extensions', 'flavors', 'image-metadata', 'image-size', 'images', 'ips', 'limits', 'os-admin-actions', 'os-admin-password', 'os-agents', 'os-aggregates', 'os-assisted-volume-snapshots', 'os-attach-interfaces', 'os-availability-zone', 'os-baremetal-nodes', 'os-block-device-mapping', 'os-cells', 'os-certificates', 'os-cloudpipe', 'os-config-drive', 'os-console-auth-tokens', 'os-console-output', 'os-consoles', 'os-create-backup', 'os-deferred-delete', 'os-evacuate', 'os-extended-availability-zone', 'os-extended-server-attributes', 'os-extended-status', 'os-extended-volumes', 'os-fixed-ips', 'os-flavor-access', 'os-flavor-extra-specs', 'os-flavor-manage', 'os-flavor-rxtx', 'os-floating-ip-dns', 'os-floating-ip-pools', 'os-floating-ips', 'os-floating-ips-bulk', 'os-fping', 'os-hide-server-addresses', 'os-hosts', 'os-hypervisors', 'os-instance-actions', 'os-instance-usage-audit-log', 'os-keypairs', 'os-lock-server', 'os-migrate-server', 'os-migrations', 'os-multinic', 'os-multiple-create', 'os-networks', 'os-networks-associate', 'os-pause-server', 'os-quota-class-sets', 'os-quota-sets', 'os-remote-consoles', 'os-rescue', 'os-scheduler-hints', 'os-security-group-default-rules', 'os-security-groups', 'os-server-diagnostics', 'os-server-external-events', 'os-server-groups', 'os-server-password', 'os-server-tags', 'os-server-usage', 'os-services', 'os-shelve', 'os-simple-tenant-usage', 'os-suspend-server', 'os-tenant-networks', 'os-used-limits', 'os-user-data', 'os-virtual-interfaces', 'os-volumes', 'server-metadata', 'server-migrations', 'servers', 'versions']
2017-02-28 09:29:47.414 2470 WARNING keystonemiddleware.auth_token [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] AuthToken middleware is set with keystone_authtoken.service_token_roles_required set to False. This is backwards compatible but deprecated behaviour. Please set this to True.
2017-02-28 09:29:48.103 2470 INFO nova.api.openstack [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] Loaded extensions: ['extensions', 'flavors', 'image-metadata', 'image-size', 'images', 'ips', 'limits', 'os-admin-actions', 'os-admin-password', 'os-agents', 'os-aggregates', 'os-assisted-volume-snapshots', 'os-attach-interfaces', 'os-availability-zone', 'os-baremetal-nodes', 'os-block-device-mapping', 'os-cells', 'os-certificates', 'os-cloudpipe', 'os-config-drive', 'os-console-auth-tokens', 'os-console-output', 'os-consoles', 'os-create-backup', 'os-deferred-delete', 'os-evacuate', 'os-extended-availability-zone', 'os-extended-server-attributes', 'os-extended-status', 'os-extended-volumes', 'os-fixed-ips', 'os-flavor-access', 'os-flavor-extra-specs', 'os-flavor-manage', 'os-flavor-rxtx', 'os-floating-ip-dns', 'os-floating-ip-pools', 'os-floating-ips', 'os-floating-ips-bulk', 'os-fping', 'os-hide-server-addresses', 'os-hosts', 'os-hypervisors', 'os-instance-actions', 'os-instance-usage-audit-log', 'os-keypairs', 'os-lock-server', 'os-migrate-server', 'os-migrations', 'os-multinic', 'os-multiple-create', 'os-networks', 'os-networks-associate', 'os-pause-server', 'os-quota-class-sets', 'os-quota-sets', 'os-remote-consoles', 'os-rescue', 'os-scheduler-hints', 'os-security-group-default-rules', 'os-security-groups', 'os-server-diagnostics', 'os-server-external-events', 'os-server-groups', 'os-server-password', 'os-server-tags', 'os-server-usage', 'os-services', 'os-shelve', 'os-simple-tenant-usage', 'os-suspend-server', 'os-tenant-networks', 'os-used-limits', 'os-user-data', 'os-virtual-interfaces', 'os-volumes', 'server-metadata', 'server-migrations', 'servers', 'versions']
2017-02-28 09:29:48.106 2470 WARNING keystonemiddleware.auth_token [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] AuthToken middleware is set with keystone_authtoken.service_token_roles_required set to False. This is backwards compatible but deprecated behaviour. Please set this to True.
2017-02-28 09:29:48.163 2470 INFO nova.wsgi [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] osapi_compute listening on 0.0.0.0:8774
2017-02-28 09:29:48.168 2470 INFO oslo_service.service [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] Starting 1 workers
2017-02-28 09:29:48.184 2470 INFO nova.network.driver [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] Loading network driver 'nova.network.linux_net'
2017-02-28 09:29:49.585 2856 INFO nova.osapi_compute.wsgi.server [req-11e649d3-059b-48c1-bf75-2a2ea139544a - - - - -] (2856) wsgi starting up on http://0.0.0.0:8774
2017-02-28 09:29:50.947 2470 INFO nova.wsgi [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] metadata listening on 0.0.0.0:8775
2017-02-28 09:29:50.947 2470 INFO oslo_service.service [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] Starting 1 workers
2017-02-28 09:29:50.962 2470 WARNING oslo_config.cfg [req-4f5f5065-7e18-4932-8ac7-48de6bcfbfa0 - - - - -] Option "force_dhcp_release" from group "DEFAULT" is deprecated for removal. Its value may be silently ignored in the future.
2017-02-28 09:29:51.757 2897 INFO nova.metadata.wsgi.server [req-37e2480f-b0de-4966-b8fc-817a2b39a322 - - - - -] (2897) wsgi starting up on http://0.0.0.0:8775
2017-02-28 09:54:02.216 2856 INFO nova.api.openstack.wsgi [req-8f950dc3-9cd7-40ba-8d5d-07743cb40aa4 - - - - -] HTTP exception thrown: Flavor m1.nano could not be found.
2017-02-28 09:54:02.219 2856 INFO nova.osapi_compute.wsgi.server [req-8f950dc3-9cd7-40ba-8d5d-07743cb40aa4 - - - - -] 127.0.0.1 "GET /v2.1/84c526841fec4019976d32b0318626a3/flavors/m1.nano HTTP/1.1" status: 404 len: 434 time: 0.8956139
2017-02-28 09:54:02.383 2856 INFO nova.api.openstack.wsgi [req-2ca503bc-31e0-464f-924f-75f8c30d6104 - - - - -] HTTP exception thrown: Flavor m1.nano could not be found.
2017-02-28 09:54:02.386 2856 INFO nova.osapi_compute.wsgi.server [req-2ca503bc-31e0-464f-924f-75f8c30d6104 - - - - -] 127.0.0.1 "GET /v2.1/84c526841fec4019976d32b0318626a3/flavors/m1.nano HTTP/1.1" status: 404 len: 434 time: 0.1665001
2017-02-28 09:54:02.572 2856 INFO nova.osapi_compute.wsgi.server [req-d106d358-7e57-4ef0-a765-0fa1ec0ff584 - - - - -] 127.0.0.1 "GET /v2.1/84c526841fec4019976d32b0318626a3/flavors HTTP/1.1" status: 200 len: 586 time: 0.1855500
2017-02-28 09:54:02.759 2856 INFO nova.osapi_compute.wsgi.server [req-c50dd86b-89ee-4ddd-bfe2-8eca23439d63 - - - - -] 127.0.0.1 "GET /v2.1/84c526841fec4019976d32b0318626a3/flavors/0 HTTP/1.1" status: 200 len: 752 time: 0.1784899
2017-02-28 09:54:04.835 2856 INFO nova.osapi_compute.wsgi.server [req-48ce8c81-5313-4dd6-b43e-5b6775a50d86 - - - - -] 127.0.0.1 "POST /v2.1/84c526841fec4019976d32b0318626a3/servers HTTP/1.1" status: 202 len: 894 time: 1.8252602
2017-02-28 09:54:06.164 2856 INFO nova.osapi_compute.wsgi.server [req-63fc77fd-599a-4e80-873a-be9cd4a0ce17 - - - - -] 127.0.0.1 "GET /v2.1/84c526841fec4019976d32b0318626a3/servers/8a32caa8-31c9-4565-a860-cce7c5766542 HTTP/1.1" status: 200 len: 1749 time: 1.3289468
2017-02-28 09:54:06.951 2856 INFO nova.osapi_compute.wsgi.server [req-4b6a2be6-ce02-4a41-8a5e-5d687ff6291c - - - - -] 127.0.0.1 "GET /v2.1/84c526841fec4019976d32b0318626a3/flavors/0 HTTP/1.1" status: 200 len: 752 time: 0.3388889
2017-02-28 09:54:16.435 2856 INFO nova.osapi_compute.wsgi.server [req-785a070c-1843-40a7-842d-72ec378b214e - - - - -] 127.0.0.1 "GET /v2.1/84c526841fec4019976d32b0318626a3/servers/detail HTTP/1.1" status: 200 len: 3139 time: 0.7085688
nova-scheduler - controller
2017-02-28 09:54:02.788 2204 INFO nova.scheduler.host_manager [req-6ca7e3a8-721f-4e12-90fc-b31212f3a355 - - - - -] Successfully synced instances from host 'compute'.
2017-02-28 09:54:09.326 2204 INFO nova.filters [req-48ce8c81-5313-4dd6-b43e-5b6775a50d86 - - - - -] Filter RetryFilter returned 0 hosts
2017-02-28 09:54:09.327 2204 INFO nova.filters [req-48ce8c81-5313-4dd6-b43e-5b6775a50d86 - - - - -] Filtering removed all hosts for the request with instance ID '8a32caa8-31c9-4565-a860-cce7c5766542'. Filter results: ['RetryFilter: (start: 0, end: 0)']
2017-02-28 09:56:02.768 2204 INFO nova.scheduler.host_manager [req-5094d888-965c-4faa-a7bc-084cba2f7896 - - - - -] Successfully synced instances from host 'compute'.
2017-02-28 09:58:06.764 2204 INFO nova.scheduler.host_manager [req-bfce1d49-702e-4944-9595-a40aa54d8466 - - - - -] Successfully synced instances from host 'compute'.
nova-conductor
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager [req-48ce8c81-5313-4dd6-b43e-5b6775a50d86 - - - - -] Failed to schedule instances
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager Traceback (most recent call last):
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/conductor/manager.py", line 866, in schedule_and_build_instances
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager request_specs[0].to_legacy_filter_properties_dict())
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/conductor/manager.py", line 597, in _schedule_instances
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager hosts = self.scheduler_client.select_destinations(context, spec_obj)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/scheduler/utils.py", line 371, in wrapped
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager return func(*args, **kwargs)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/scheduler/client/init.py", line 51, in select_destinations
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager return self.queryclient.select_destinations(context, spec_obj)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/scheduler/client/init.py", line 37, in __run_method
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager return getattr(self.instance, __name)(*args, **kwargs)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/scheduler/client/query.py", line 32, in select_destinations
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager return self.scheduler_rpcapi.select_destinations(context, spec_obj)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/scheduler/rpcapi.py", line 129, in select_destinations
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager return cctxt.call(ctxt, 'select_destinations', **msg_args)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/oslo_messaging/rpc/client.py", line 169, in call
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager retry=self.retry)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/oslo_messaging/transport.py", line 97, in _send
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager timeout=timeout, retry=retry)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py", line 458, in send
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager retry=retry)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py", line 449, in _send
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager raise result
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager NoValidHost_Remote: No valid host was found. There are not enough hosts available.
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager Traceback (most recent call last):
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/oslo_messaging/rpc/server.py", line 218, in inner
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager return func(*args, **kwargs)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/scheduler/manager.py", line 98, in select_destinations
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager dests = self.driver.select_destinations(ctxt, spec_obj)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager File "/usr/lib/python2.7/dist-packages/nova/scheduler/filter_scheduler.py", line 79, in select_destinations
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager raise exception.NoValidHost(reason=reason)
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager NoValidHost: No valid host was found. There are not enough hosts available.
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager
2017-02-28 09:54:09.340 2428 ERROR nova.conductor.manager
2017-02-28 09:54:09.389 2428 WARNING nova.scheduler.utils [req-48ce8c81-5313-4dd6-b43e-5b6775a50d86 - - - - -] Failed to compute_task_build_instances: No valid host was found. There are not enough hosts available.
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/oslo_messaging/rpc/server.py", line 218, in inner
return func(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/nova/scheduler/manager.py", line 98, in select_destinations
dests = self.driver.select_destinations(ctxt, spec_obj)
File "/usr/lib/python2.7/dist-packages/nova/scheduler/filter_scheduler.py", line 79, in select_destinations
raise exception.NoValidHost(reason=reason)
NoValidHost: No valid host was found. There are not enough hosts available.
2017-02-28 09:54:09.389 2428 WARNING nova.scheduler.utils [req-48ce8c81-5313-4dd6-b43e-5b6775a50d86 - - - - -] [instance: 8a32caa8-31c9-4565-a860-cce7c5766542] Setting instance to ERROR state.
compute
2017-02-28 09:58:07.852 1621 WARNING nova.scheduler.client.report [req-62fa3550-e8f7-4111-a297-6b24b20a7e1f - - - - -] The placement API endpoint not found. Placement is optional in Newton, but required in Ocata. Please enable the placement service before upgrading.
nova
mysql+pymysql://nova:linuxacademy123@controller/nova
nova_api and placement
mysql+pymysql://nova:linuxacademy123@controller/nova_api
[DEFAULT]
From nova.conf
DEPRECATED:
When returning instance metadata, this is the class that is used
for getting vendor metadata when that class isn't specified in the individual
request. The value should be the full dot-separated path to the class to use.
Possible values:
* Any valid dot-separated class path that can be imported.
(string value)
This option is deprecated for removal since 13.0.0.
Its value may be silently ignored in the future.
#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData
DEPRECATED:
This option is used to enable or disable quota checking for tenant networks.
Related options:
* quota_networks
(boolean value)
This option is deprecated for removal since 14.0.0.
Its value may be silently ignored in the future.
Reason:
CRUD operations on tenant networks are only available when using nova-network
and nova-network is itself deprecated.
#enable_network_quota=false
DEPRECATED:
This option controls the number of private networks that can be created per
project (or per tenant).
Related options:
* enable_network_quota
(integer value)
Minimum value: 0
This option is deprecated for removal since 14.0.0.
Its value may be silently ignored in the future.
Reason:
CRUD operations on tenant networks are only available when using nova-network
and nova-network is itself deprecated.
#quota_networks=3
This option specifies the name of the availability zone for the
internal services. Services like nova-scheduler, nova-network,
nova-conductor are internal services. These services will appear in
their own internal availability_zone.
Possible values:
* Any string representing an availability zone name
* 'internal' is the default value
(string value)
#internal_service_availability_zone=internal
Default compute node availability_zone.
This option determines the availability zone to be used when it is not
specified in the VM creation request. If this option is not set,
the default availability zone 'nova' is used.
Possible values:
* Any string representing an availability zone name
* 'nova' is the default value
(string value)
#default_availability_zone=nova
Length of generated instance admin passwords. (integer value)
Minimum value: 0
#password_length=12
Time period to generate instance usages for. It is possible to define optional
offset to given period by appending @ character followed by a number defining
offset.
Possible values:
* period, example: hour
, day
, month` or
year``
* period with offset, example: month@15
will result in monthly audits
starting on 15th day of month.
(string value)
#instance_usage_audit_period=month
Start and use a daemon that can run the commands that need to be run with
root privileges. This option is usually enabled on nodes that run nova compute
processes.
(boolean value)
#use_rootwrap_daemon=false
Path to the rootwrap configuration file.
Goal of the root wrapper is to allow a service-specific unprivileged user to
run a number of actions as the root user in the safest manner possible.
The configuration file used here must match the one defined in the sudoers
entry.
(string value)
#rootwrap_config=/etc/nova/rootwrap.conf
Explicitly specify the temporary working directory. (string value)
#tempdir=
Determine if monkey patching should be applied.
Related options:
* monkey_patch_modules
: This must have values set for this option to
have any effect
(boolean value)
#monkey_patch=false
List of modules/decorators to monkey patch.
This option allows you to patch a decorator for all functions in specified
modules.
Possible values:
* nova.compute.api:nova.notifications.notify_decorator
* nova.api.ec2.cloud:nova.notifications.notify_decorator
* [...]
Related options:
* monkey_patch
: This must be set to True
for this option to
have any effect
(list value)
#monkey_patch_modules=nova.compute.api:nova.notifications.notify_decorator
Defines which driver to use for controlling virtualization.
Possible values:
* libvirt.LibvirtDriver
* xenapi.XenAPIDriver
* fake.FakeDriver
* ironic.IronicDriver
* vmwareapi.VMwareVCDriver
* hyperv.HyperVDriver
(string value)
#compute_driver=
Allow destination machine to match source for resize. Useful when
testing in single-host environments. By default it is not allowed
to resize to the same host. Setting this option to true will add
the same host to the destination options.
(boolean value)
#allow_resize_to_same_host=false
Availability zone to use when user doesn't specify one.
This option is used by the scheduler to determine which availability
zone to place a new VM instance into if the user did not specify one
at the time of VM boot request.
Possible values:
* Any string representing an availability zone name
* Default value is None.
(string value)
#default_schedule_zone=
Image properties that should not be inherited from the instance
when taking a snapshot.
This option gives an opportunity to select which image-properties
should not be inherited by newly created snapshots.
Possible values:
* A list whose item is an image property. Usually only the image
properties that are only needed by base images can be included
here, since the snapshots that are created from the base images
doesn't need them.
* Default list: ['cache_in_nova', 'bittorrent']
(list value)
#non_inheritable_image_properties=cache_in_nova,bittorrent
DEPRECATED:
This option is used to decide when an image should have no external
ramdisk or kernel. By default this is set to 'nokernel', so when an
image is booted with the property 'kernel_id' with the value
'nokernel', Nova assumes the image doesn't require an external kernel
and ramdisk.
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
When an image is booted with the property 'kernel_id' with the value
'nokernel', Nova assumes the image doesn't require an external kernel and
ramdisk. This option allows user to change the API behaviour which should not
be allowed and this value "nokernel" should be hard coded.
#null_kernel=nokernel
DEPRECATED:
When creating multiple instances with a single request using the
os-multiple-create API extension, this template will be used to build
the display name for each instance. The benefit is that the instances
end up with different hostnames. Example display names when creating
two VM's: name-1, name-2.
Possible values:
* Valid keys for the template are: name, uuid, count.
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
This config changes API behaviour. All changes in API behaviour should be
discoverable.
#multi_instance_display_name_template=%(name)s-%(count)d
Maximum number of devices that will result in a local image being
created on the hypervisor node.
A negative number means unlimited. Setting max_local_block_devices
to 0 means that any request that attempts to create a local disk
will fail. This option is meant to limit the number of local discs
(so root local disc that is the result of --image being used, and
any other ephemeral and swap disks). 0 does not mean that images
will be automatically converted to volumes and boot instances from
volumes - it just means that all requests that attempt to create a
local disk will fail.
Possible values:
* 0: Creating a local disk is not allowed.
* Negative number: Allows unlimited number of local discs.
* Positive number: Allows only these many number of local discs.
(Default value is 3).
(integer value)
#max_local_block_devices=3
A list of monitors that can be used for getting compute metrics.
You can use the alias/name from the setuptools entry points for
nova.compute.monitors.* namespaces. If no namespace is supplied,
the "cpu." namespace is assumed for backwards-compatibility.
Possible values:
* An empty list will disable the feature(Default).
* An example value that would enable both the CPU and NUMA memory
bandwidth monitors that used the virt driver variant:
["cpu.virt_driver", "numa_mem_bw.virt_driver"]
(list value)
#compute_monitors =
The default format an ephemeral_volume will be formatted with on creation.
Possible values:
* ext2
* ext3
* ext4
* xfs
* ntfs
(only for Windows guests)
(string value)
#default_ephemeral_format=
Determine if instance should boot or fail on VIF plugging timeout.
Nova sends a port update to Neutron after an instance has been scheduled,
providing Neutron with the necessary information to finish setup of the port.
Once completed, Neutron notifies Nova that it has finished setting up the
port, at which point Nova resumes the boot of the instance since network
connectivity is now supposed to be present. A timeout will occur if the reply
is not received after a given interval.
This option determines what Nova does when the VIF plugging timeout event
happens. When enabled, the instance will error out. When disabled, the
instance will continue to boot on the assumption that the port is ready.
Possible values:
* True: Instances should fail after VIF plugging timeout
* False: Instances should continue booting after VIF plugging timeout
(boolean value)
#vif_plugging_is_fatal=true
Timeout for Neutron VIF plugging event message arrival.
Number of seconds to wait for Neutron vif plugging events to
arrive before continuing or failing (see 'vif_plugging_is_fatal').
Related options:
* vif_plugging_is_fatal - If vif_plugging_timeout
is set to zero and
vif_plugging_is_fatal
is False, events should not be expected to
arrive at all.
(integer value)
Minimum value: 0
#vif_plugging_timeout=300
Path to '/etc/network/interfaces' template.
The path to a template file for the '/etc/network/interfaces'-style file,
which
will be populated by nova and subsequently used by cloudinit. This provides a
method to configure network connectivity in environments without a DHCP
server.
The template will be rendered using Jinja2 template engine, and receive a
top-level key called interfaces
. This key will contain a list of
dictionaries, one for each interface.
Refer to the cloudinit documentaion for more information:
https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
Possible values:
* A path to a Jinja2-formatted template for a Debian '/etc/network/interfaces'
file. This applies even if using a non Debian-derived guest.
Related options:
* flat_inject
: This must be set to True
to ensure nova embeds network
configuration information in the metadata provided through the config drive.
(string value)
#injected_network_template=$pybasedir/nova/virt/interfaces.template
The image preallocation mode to use.
Image preallocation allows storage for instance images to be allocated up
front
when the instance is initially provisioned. This ensures immediate feedback is
given if enough space isn't available. In addition, it should significantly
improve performance on writes to new blocks and may even improve I/O
performance to prewritten blocks due to reduced fragmentation.
Possible values:
* "none" => no storage provisioning is done up front
* "space" => storage is fully allocated at instance start
(string value)
Allowed values: none, space
#preallocate_images=none
Enable use of copy-on-write (cow) images.
QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
backing files will not be used.
(boolean value)
#use_cow_images=true
Force conversion of backing images to raw format.
Possible values:
* True: Backing image files will be converted to raw image format
* False: Backing image files will not be converted
Related options:
* compute_driver
: Only the libvirt driver uses this option.
(boolean value)
#force_raw_images=true
Name of the mkfs commands for ephemeral device.
The format is <os_type>=
(multi valued)
#virt_mkfs =
Enable resizing of filesystems via a block device.
If enabled, attempt to resize the filesystem by accessing the image over a
block device. This is done by the host and may not be necessary if the image
contains a recent version of cloud-init. Possible mechanisms require the nbd
driver (for qcow and raw), or loop (for raw).
(boolean value)
#resize_fs_using_block_device=false
Amount of time, in seconds, to wait for NBD device start up. (integer value)
Minimum value: 0
#timeout_nbd=10
Location of cached images.
This is NOT the full path - just a folder name relative to '$instances_path'.
For per-compute-host cached images, set to 'base$my_ip'
(string value)
#image_cache_subdirectory_name=_base
Should unused base images be removed? (boolean value)
#remove_unused_base_images=true
Unused unresized base images younger than this will not be removed.
(integer value)
#remove_unused_original_minimum_age_seconds=86400
Generic property to specify the pointer type.
Input devices allow interaction with a graphical framebuffer. For
example to provide a graphic tablet for absolute cursor movement.
If set, the 'hw_pointer_model' image property takes precedence over
this configuration option.
Possible values:
* None: Uses default behavior provided by drivers (mouse on PS2 for
libvirt x86)
* ps2mouse: Uses relative movement. Mouse connected by PS2
* usbtablet: Uses absolute movement. Tablet connect by USB
Related options:
* usbtablet must be configured with VNC enabled or SPICE enabled and SPICE
agent disabled. When used with libvirt the instance mode should be
configured as HVM.
(string value)
Allowed values: , ps2mouse, usbtablet
#pointer_model=usbtablet
Defines which physical CPUs (pCPUs) can be used by instance
virtual CPUs (vCPUs).
Possible values:
* A comma-separated list of physical CPU numbers that virtual CPUs can be
allocated to by default. Each element should be either a single CPU number,
a range of CPU numbers, or a caret followed by a CPU number to be
excluded from a previous range. For example:
vcpu_pin_set = "4-12,^8,15"
(string value)
#vcpu_pin_set=
Number of huge/large memory pages to reserved per NUMA host cell.
Possible values:
* A list of valid key=value which reflect NUMA node ID, page size
(Default unit is KiB) and number of pages to be reserved.
reserved_huge_pages = node:0,size:2048,count:64
reserved_huge_pages = node:1,size:1GB,count:1
In this example we are reserving on NUMA node 0 64 pages of 2MiB
and on NUMA node 1 1 page of 1GiB.
(dict value)
#reserved_huge_pages=
Amount of disk resources in MB to make them always available to host. The
disk usage gets reported back to the scheduler from nova-compute running
on the compute nodes. To prevent the disk resources from being considered
as available, this option can be used to reserve disk space for that host.
Possible values:
* Any positive integer representing amount of disk in MB to reserve
for the host.
(integer value)
Minimum value: 0
#reserved_host_disk_mb=0
Amount of memory in MB to reserve for the host so that it is always available
to host processes. The host resources usage is reported back to the scheduler
continuously from nova-compute running on the compute node. To prevent the
host
memory from being considered as available, this option is used to reserve
memory for the host.
Possible values:
* Any positive integer representing amount of memory in MB to reserve
for the host.
(integer value)
Minimum value: 0
#reserved_host_memory_mb=512
This option helps you specify virtual CPU to physical CPU allocation ratio.
From Ocata (15.0.0) this is used to influence the hosts selected by
the Placement API. Note that when Placement is used, the CoreFilter
is redundant, because the Placement API will have already filtered
out hosts that would have failed the CoreFilter.
This configuration specifies ratio for CoreFilter which can be set
per compute node. For AggregateCoreFilter, it will fall back to this
configuration value if no per-aggregate setting is found.
NOTE: This can be set per-compute, or if set to 0.0, the value
set on the scheduler node(s) or compute node(s) will be used
and defaulted to 16.0'.
Possible values:
* Any valid positive integer or float value
(floating point value)
Minimum value: 0
#cpu_allocation_ratio=0.0
This option helps you specify virtual RAM to physical RAM
allocation ratio.
From Ocata (15.0.0) this is used to influence the hosts selected by
the Placement API. Note that when Placement is used, the RamFilter
is redundant, because the Placement API will have already filtered
out hosts that would have failed the RamFilter.
This configuration specifies ratio for RamFilter which can be set
per compute node. For AggregateRamFilter, it will fall back to this
configuration value if no per-aggregate setting found.
NOTE: This can be set per-compute, or if set to 0.0, the value
set on the scheduler node(s) or compute node(s) will be used and
defaulted to 1.5.
Possible values:
* Any valid positive integer or float value
(floating point value)
Minimum value: 0
#ram_allocation_ratio=0.0
This option helps you specify virtual disk to physical disk
allocation ratio.
From Ocata (15.0.0) this is used to influence the hosts selected by
the Placement API. Note that when Placement is used, the DiskFilter
is redundant, because the Placement API will have already filtered
out hosts that would have failed the DiskFilter.
A ratio greater than 1.0 will result in over-subscription of the
available physical disk, which can be useful for more
efficiently packing instances created with images that do not
use the entire virtual disk, such as sparse or compressed
images. It can be set to a value between 0.0 and 1.0 in order
to preserve a percentage of the disk for uses other than
instances.
NOTE: This can be set per-compute, or if set to 0.0, the value
set on the scheduler node(s) or compute node(s) will be used and
defaulted to 1.0'.
Possible values:
* Any valid positive integer or float value
(floating point value)
Minimum value: 0
#disk_allocation_ratio=0.0
Console proxy host to be used to connect to instances on this host. It is the
publicly visible name for the console host.
Possible values:
* Current hostname (default) or any string representing hostname.
(string value)
#console_host=socket.gethostname()
Name of the network to be used to set access IPs for instances. If there are
multiple IPs to choose from, an arbitrary one will be chosen.
Possible values:
* None (default)
* Any string representing network name.
(string value)
#default_access_ip_network_name=
Whether to batch up the application of IPTables rules during a host restart
and apply all at the end of the init phase.
(boolean value)
#defer_iptables_apply=false
Specifies where instances are stored on the hypervisor's disk.
It can point to locally attached storage or a directory on NFS.
Possible values:
* $state_path/instances where state_path is a config option that specifies
the top-level directory for maintaining nova's state. (default) or
Any string representing directory path.
(string value)
#instances_path=$state_path/instances
This option enables periodic compute.instance.exists notifications. Each
compute node must be configured to generate system usage data. These
notifications are consumed by OpenStack Telemetry service.
(boolean value)
#instance_usage_audit=false
Maximum number of 1 second retries in live_migration. It specifies number
of retries to iptables when it complains. It happens when an user continuously
sends live-migration request to same host leading to concurrent request
to iptables.
Possible values:
* Any positive integer representing retry count.
(integer value)
Minimum value: 0
#live_migration_retry_count=30
This option specifies whether to start guests that were running before the
host rebooted. It ensures that all of the instances on a Nova compute node
resume their state each time the compute node boots or restarts.
(boolean value)
#resume_guests_state_on_host_boot=false
Number of times to retry network allocation. It is required to attempt network
allocation retries if the virtual interface plug fails.
Possible values:
* Any positive integer representing retry count.
(integer value)
Minimum value: 0
#network_allocate_retries=0
Limits the maximum number of instance builds to run concurrently by
nova-compute. Compute service can attempt to build an infinite number of
instances, if asked to do so. This limit is enforced to avoid building
unlimited instance concurrently on a compute node. This value can be set
per compute node.
Possible Values:
* 0 : treated as unlimited.
* Any positive integer representing maximum concurrent builds.
(integer value)
Minimum value: 0
#max_concurrent_builds=10
Maximum number of live migrations to run concurrently. This limit is enforced
to avoid outbound live migrations overwhelming the host/network and causing
failures. It is not recommended that you change this unless you are very sure
that doing so is safe and stable in your environment.
Possible values:
* 0 : treated as unlimited.
* Negative value defaults to 0.
* Any positive integer representing maximum number of live migrations
to run concurrently.
(integer value)
#max_concurrent_live_migrations=1
Number of times to retry block device allocation on failures. Starting with
Liberty, Cinder can use image volume cache. This may help with block device
allocation performance. Look at the cinder image_volume_cache_enabled
configuration option.
Possible values:
* 60 (default)
* If value is 0, then one attempt is made.
* Any negative value is treated as 0.
* For any value > 0, total attempts are (value + 1)
(integer value)
#block_device_allocate_retries=60
Number of greenthreads available for use to sync power states.
This option can be used to reduce the number of concurrent requests
made to the hypervisor or system with real instance power states
for performance reasons, for example, with Ironic.
Possible values:
* Any positive integer representing greenthreads count.
(integer value)
#sync_power_state_pool_size=1000
Number of seconds to wait between runs of the image cache manager.
Possible values:
* 0: run at the default rate.
* -1: disable
* Any other value
(integer value)
Minimum value: -1
#image_cache_manager_interval=2400
Interval to pull network bandwidth usage info.
Not supported on all hypervisors. If a hypervisor doesn't support bandwidth
usage, it will not get the info in the usage events.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
(integer value)
#bandwidth_poll_interval=600
Interval to sync power states between the database and the hypervisor.
The interval that Nova checks the actual virtual machine power state
and the power state that Nova has in its database. If a user powers
down their VM, Nova updates the API to report the VM has been
powered down. Should something turn on the VM unexpectedly,
Nova will turn the VM back off to keep the system in the expected
state.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
Related options:
* If handle_virt_lifecycle_events
in workarounds_group is
false and this option is negative, then instances that get out
of sync between the hypervisor and the Nova database will have
to be synchronized manually.
(integer value)
#sync_power_state_interval=600
Interval between instance network information cache updates.
Number of seconds after which each compute node runs the task of
querying Neutron for all of its instances networking information,
then updates the Nova db with that information. Nova will never
update it's cache if this option is set to 0. If we don't update the
cache, the metadata service and nova-api endpoints will be proxying
incorrect network data about the instance. So, it is not recommended
to set this option to 0.
Possible values:
* Any positive integer in seconds.
* Any value <=0 will disable the sync. This is not recommended.
(integer value)
#heal_instance_info_cache_interval=60
Interval for reclaiming deleted instances.
A value greater than 0 will enable SOFT_DELETE of instances.
This option decides whether the server to be deleted will be put into
the SOFT_DELETED state. If this value is greater than 0, the deleted
server will not be deleted immediately, instead it will be put into
a queue until it's too old (deleted time greater than the value of
reclaim_instance_interval). The server can be recovered from the
delete queue by using the restore action. If the deleted server remains
longer than the value of reclaim_instance_interval, it will be
deleted by a periodic task in the compute service automatically.
Note that this option is read from both the API and compute nodes, and
must be set globally otherwise servers could be put into a soft deleted
state in the API and never actually reclaimed (deleted) on the compute
node.
Possible values:
* Any positive integer(in seconds) greater than 0 will enable
this option.
* Any value <=0 will disable the option.
(integer value)
#reclaim_instance_interval=0
Interval for gathering volume usages.
This option updates the volume usage cache for every
volume_usage_poll_interval number of seconds.
Possible values:
* Any positive integer(in seconds) greater than 0 will enable
this option.
* Any value <=0 will disable the option.
(integer value)
#volume_usage_poll_interval=0
Interval for polling shelved instances to offload.
The periodic task runs for every shelved_poll_interval number
of seconds and checks if there are any shelved instances. If it
finds a shelved instance, based on the 'shelved_offload_time' config
value it offloads the shelved instances. Check 'shelved_offload_time'
config option description for details.
Possible values:
* Any value <= 0: Disables the option.
* Any positive integer in seconds.
Related options:
* shelved_offload_time
(integer value)
#shelved_poll_interval=3600
Time before a shelved instance is eligible for removal from a host.
By default this option is set to 0 and the shelved instance will be
removed from the hypervisor immediately after shelve operation.
Otherwise, the instance will be kept for the value of
shelved_offload_time(in seconds) so that during the time period the
unshelve action will be faster, then the periodic task will remove
the instance from hypervisor after shelved_offload_time passes.
Possible values:
* 0: Instance will be immediately offloaded after being
shelved.
* Any value < 0: An instance will never offload.
* Any positive integer in seconds: The instance will exist for
the specified number of seconds before being offloaded.
(integer value)
#shelved_offload_time=0
Interval for retrying failed instance file deletes.
This option depends on 'maximum_instance_delete_attempts'.
This option specifies how often to retry deletes whereas
'maximum_instance_delete_attempts' specifies the maximum number
of retry attempts that can be made.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
Related options:
* maximum_instance_delete_attempts
from instance_cleaning_opts
group.
(integer value)
#instance_delete_interval=300
Interval (in seconds) between block device allocation retries on failures.
This option allows the user to specify the time interval between
consecutive retries. 'block_device_allocate_retries' option specifies
the maximum number of retries.
Possible values:
* 0: Disables the option.
* Any positive integer in seconds enables the option.
Related options:
* ``block_device_allocate_retries'' in compute_manager_opts
group.
(integer value)
Minimum value: 0
#block_device_allocate_retries_interval=3
Interval between sending the scheduler a list of current instance UUIDs to
verify that its view of instances is in sync with nova.
If the CONF option 'scheduler_tracks_instance_changes' is
False, the sync calls will not be made. So, changing this option will
have no effect.
If the out of sync situations are not very common, this interval
can be increased to lower the number of RPC messages being sent.
Likewise, if sync issues turn out to be a problem, the interval
can be lowered to check more frequently.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
Related options:
* This option has no impact if scheduler_tracks_instance_changes
is set to False.
(integer value)
#scheduler_instance_sync_interval=120
Interval for updating compute resources.
This option specifies how often the update_available_resources
periodic task should run. A number less than 0 means to disable the
task completely. Leaving this at the default of 0 will cause this to
run at the default periodic interval. Setting it to any positive
value will cause it to run at approximately that number of seconds.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
(integer value)
#update_resources_interval=0
Time interval after which an instance is hard rebooted automatically.
When doing a soft reboot, it is possible that a guest kernel is
completely hung in a way that causes the soft reboot task
to not ever finish. Setting this option to a time period in seconds
will automatically hard reboot an instance if it has been stuck
in a rebooting state longer than N seconds.
Possible values:
* 0: Disables the option (default).
* Any positive integer in seconds: Enables the option.
(integer value)
Minimum value: 0
#reboot_timeout=0
Maximum time in seconds that an instance can take to build.
If this timer expires, instance status will be changed to ERROR.
Enabling this option will make sure an instance will not be stuck
in BUILD state for a longer period.
Possible values:
* 0: Disables the option (default)
* Any positive integer in seconds: Enables the option.
(integer value)
Minimum value: 0
#instance_build_timeout=0
Interval to wait before un-rescuing an instance stuck in RESCUE.
Possible values:
* 0: Disables the option (default)
* Any positive integer in seconds: Enables the option.
(integer value)
Minimum value: 0
#rescue_timeout=0
Automatically confirm resizes after N seconds.
Resize functionality will save the existing server before resizing.
After the resize completes, user is requested to confirm the resize.
The user has the opportunity to either confirm or revert all
changes. Confirm resize removes the original server and changes
server status from resized to active. Setting this option to a time
period (in seconds) will automatically confirm the resize if the
server is in resized state longer than that time.
Possible values:
* 0: Disables the option (default)
* Any positive integer in seconds: Enables the option.
(integer value)
Minimum value: 0
#resize_confirm_window=0
Total time to wait in seconds for an instance toperform a clean
shutdown.
It determines the overall period (in seconds) a VM is allowed to
perform a clean shutdown. While performing stop, rescue and shelve,
rebuild operations, configuring this option gives the VM a chance
to perform a controlled shutdown before the instance is powered off.
The default timeout is 60 seconds.
The timeout value can be overridden on a per image basis by means
of os_shutdown_timeout that is an image metadata setting allowing
different types of operating systems to specify how much time they
need to shut down cleanly.
Possible values:
* Any positive integer in seconds (default value is 60).
(integer value)
Minimum value: 1
#shutdown_timeout=60
The compute service periodically checks for instances that have been
deleted in the database but remain running on the compute node. The
above option enables action to be taken when such instances are
identified.
Possible values:
* reap: Powers down the instances and deletes them(default)
* log: Logs warning message about deletion of the resource
* shutdown: Powers down instances and marks them as non-
bootable which can be later used for debugging/analysis
* noop: Takes no action
Related options:
* running_deleted_instance_poll
* running_deleted_instance_timeout
(string value)
Allowed values: noop, log, shutdown, reap
#running_deleted_instance_action=reap
Time interval in seconds to wait between runs for the clean up action.
If set to 0, above check will be disabled. If "running_deleted_instance
_action" is set to "log" or "reap", a value greater than 0 must be set.
Possible values:
* Any positive integer in seconds enables the option.
* 0: Disables the option.
* 1800: Default value.
Related options:
* running_deleted_instance_action
(integer value)
#running_deleted_instance_poll_interval=1800
Time interval in seconds to wait for the instances that have
been marked as deleted in database to be eligible for cleanup.
Possible values:
* Any positive integer in seconds(default is 0).
Related options:
* "running_deleted_instance_action"
(integer value)
#running_deleted_instance_timeout=0
The number of times to attempt to reap an instance's files.
This option specifies the maximum number of retry attempts
that can be made.
Possible values:
* Any positive integer defines how many attempts are made.
* Any value <=0 means no delete attempts occur, but you should use
instance_delete_interval
to disable the delete attempts.
Related options:
* instance_delete_interval
in interval_opts group can be used to disable
this option.
(integer value)
#maximum_instance_delete_attempts=5
DEPRECATED:
This is the message queue topic that the compute service 'listens' on. It is
used when the compute service is started up to configure the queue, and
whenever an RPC call to the compute service is made.
Possible values:
* Any string, but there is almost never any reason to ever change this value
from its default of 'compute'.
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
There is no need to let users choose the RPC topic for all services - there
is little gain from this. Furthermore, it makes it really easy to break Nova
by using this option.
#compute_topic=compute
Sets the scope of the check for unique instance names.
The default doesn't check for unique names. If a scope for the name check is
set, a launch of a new instance or an update of an existing instance with a
duplicate name will result in an ''InstanceExists'' error. The uniqueness is
case-insensitive. Setting this option can increase the usability for end
users as they don't have to distinguish among instances with the same name
by their IDs.
Possible values:
* '': An empty value means that no uniqueness check is done and duplicate
names are possible.
* "project": The instance name check is done only for instances within the
same project.
* "global": The instance name check is done for all instances regardless of
the project.
(string value)
Allowed values: '', project, global
#osapi_compute_unique_server_name_scope =
Enable new services on this host automatically.
When a new service (for example "nova-compute") starts up, it gets
registered in the database as an enabled service. Sometimes it can be useful
to register new services in disabled state and then enabled them at a later
point in time. This option can set this behavior for all services per host.
Possible values:
* True
: Each new service is enabled as soon as it registers itself.
* False
: Services must be enabled via a REST API call or with the CLI
with nova service-enable <hostname> <binary>
, otherwise they are not
ready to use.
(boolean value)
#enable_new_services=true
Template string to be used to generate instance names.
This template controls the creation of the database name of an instance. This
is not the display name you enter when creating an instance (via Horizon
or CLI). For a new deployment it is advisable to change the default value
(which uses the database autoincrement) to another value which makes use
of the attributes of an instance, like instance-%(uuid)s
. If you
already have instances in your deployment when you change this, your
deployment will break.
Possible values:
* A string which either uses the instance database ID (like the
default)
* A string with a list of named database columns, for example %(id)d
or %(uuid)s
or %(hostname)s
.
Related options:
* not to be confused with: multi_instance_display_name_template
(string value)
#instance_name_template=instance-%08x
Number of times to retry live-migration before failing.
Possible values:
* If == -1, try until out of hosts (default)
* If == 0, only try once, no retries
* Integer greater than 0
(integer value)
Minimum value: -1
#migrate_max_retries=-1
Configuration drive format
Configuration drive format that will contain metadata attached to the
instance when it boots.
Possible values:
* iso9660: A file system image standard that is widely supported across
operating systems. NOTE: Mind the libvirt bug
(https://bugs.launchpad.net/nova/+bug/1246201) - If your hypervisor
driver is libvirt, and you want live migrate to work without shared storage,
then use VFAT.
* vfat: For legacy reasons, you can configure the configuration drive to
use VFAT format instead of ISO 9660.
Related options:
* This option is meaningful when one of the following alternatives occur:
1. force_config_drive option set to 'true'
2. the REST API call to create the instance contains an enable flag for
config drive option
3. the image used to create the instance requires a config drive,
this is defined by img_config_drive property for that image.
* A compute node running Hyper-V hypervisor can be configured to attach
configuration drive as a CD drive. To attach the configuration drive as a CD
drive, set config_drive_cdrom option at hyperv section, to true.
(string value)
Allowed values: iso9660, vfat
#config_drive_format=iso9660
Force injection to take place on a config drive
When this option is set to true configuration drive functionality will be
forced enabled by default, otherwise user can still enable configuration
drives via the REST API or image metadata properties.
Possible values:
* True: Force to use of configuration drive regardless the user's input in the
REST API call.
* False: Do not force use of configuration drive. Config drives can still be
enabled via the REST API or image metadata properties.
Related options:
* Use the 'mkisofs_cmd' flag to set the path where you install the
genisoimage program. If genisoimage is in same path as the
nova-compute service, you do not need to set this flag.
* To use configuration drive with Hyper-V, you must set the
'mkisofs_cmd' value to the full path to an mkisofs.exe installation.
Additionally, you must set the qemu_img_cmd value in the hyperv
configuration section to the full path to an qemu-img command
installation.
(boolean value)
#force_config_drive=false
Name or path of the tool used for ISO image creation
Use the mkisofs_cmd flag to set the path where you install the genisoimage
program. If genisoimage is on the system path, you do not need to change
the default value.
To use configuration drive with Hyper-V, you must set the mkisofs_cmd value
to the full path to an mkisofs.exe installation. Additionally, you must set
the qemu_img_cmd value in the hyperv configuration section to the full path
to an qemu-img command installation.
Possible values:
* Name of the ISO image creator program, in case it is in the same directory
as the nova-compute service
* Path to ISO image creator program
Related options:
* This option is meaningful when config drives are enabled.
* To use configuration drive with Hyper-V, you must set the qemu_img_cmd
value in the hyperv configuration section to the full path to an qemu-img
command installation.
(string value)
#mkisofs_cmd=genisoimage
DEPRECATED:
nova-console-proxy is used to set up multi-tenant VM console access.
This option allows pluggable driver program for the console session
and represents driver to use for the console proxy.
Possible values:
* A string representing fully classified class name of console driver.
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
This option no longer does anything. Previously this option had only two
valid,
in-tree values: nova.console.xvp.XVPConsoleProxy and
nova.console.fake.FakeConsoleProxy. The latter of these was only used in tests
and has since been replaced.
#console_driver=nova.console.xvp.XVPConsoleProxy
DEPRECATED:
Represents the message queue topic name used by nova-console
service when communicating via the AMQP server. The Nova API uses a message
queue to communicate with nova-console to retrieve a console URL for that
host.
Possible values:
* A string representing topic exchange name
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
There is no need to let users choose the RPC topic for all services - there
is little gain from this. Furthermore, it makes it really easy to break Nova
by using this option.
#console_topic=console
DEPRECATED:
This option allows you to change the message topic used by nova-consoleauth
service when communicating via the AMQP server. Nova Console Authentication
server authenticates nova consoles. Users can then access their instances
through VNC clients. The Nova API service uses a message queue to
communicate with nova-consoleauth to get a VNC console.
Possible Values:
* 'consoleauth' (default) or Any string representing topic exchange name.
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
There is no need to let users choose the RPC topic for all services - there
is little gain from this. Furthermore, it makes it really easy to break Nova
by using this option.
#consoleauth_topic=consoleauth
DEPRECATED: The driver to use for database access (string value)
This option is deprecated for removal since 13.0.0.
Its value may be silently ignored in the future.
#db_driver=nova.db
DEPRECATED:
Default flavor to use for the EC2 API only.
The Nova API does not support a default flavor.
(string value)
This option is deprecated for removal since 14.0.0.
Its value may be silently ignored in the future.
Reason: The EC2 API is deprecated.
#default_flavor=m1.small
Default pool for floating IPs.
This option specifies the default floating IP pool for allocating floating
IPs.
While allocating a floating ip, users can optionally pass in the name of the
pool they want to allocate from, otherwise it will be pulled from the
default pool.
If this option is not set, then 'nova' is used as default floating pool.
Possible values:
* Any string representing a floating IP pool name
(string value)
#default_floating_pool=nova
DEPRECATED:
Autoassigning floating IP to VM
When set to True, floating IP is auto allocated and associated
to the VM upon creation.
Related options:
* use_neutron: this options only works with nova-network.
(boolean value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#auto_assign_floating_ip=false
DEPRECATED:
Full class name for the DNS Manager for floating IPs.
This option specifies the class of the driver that provides functionality
to manage DNS entries associated with floating IPs.
When a user adds a DNS entry for a specified domain to a floating IP,
nova will add a DNS entry using the specified floating DNS driver.
When a floating IP is deallocated, its DNS entry will automatically be
deleted.
Possible values:
* Full Python path to the class to be used
Related options:
* use_neutron: this options only works with nova-network.
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
DEPRECATED:
Full class name for the DNS Manager for instance IPs.
This option specifies the class of the driver that provides functionality
to manage DNS entries for instances.
On instance creation, nova will add DNS entries for the instance name and
id, using the specified instance DNS driver and domain. On instance deletion,
nova will remove the DNS entries.
Possible values:
* Full Python path to the class to be used
Related options:
* use_neutron: this options only works with nova-network.
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
DEPRECATED:
If specified, Nova checks if the availability_zone of every instance matches
what the database says the availability_zone should be for the specified
dns_domain.
Related options:
* use_neutron: this options only works with nova-network.
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#instance_dns_domain =
Abstracts out IPv6 address generation to pluggable backends.
nova-network can be put into dual-stack mode, so that it uses
both IPv4 and IPv6 addresses. In dual-stack mode, by default, instances
acquire IPv6 global unicast addresses with the help of stateless address
auto-configuration mechanism.
Related options:
* use_neutron: this option only works with nova-network.
* use_ipv6: this option only works if ipv6 is enabled for nova-network.
(string value)
Allowed values: rfc2462, account_identifier
#ipv6_backend=rfc2462
The IP address which the host is using to connect to the management network.
Possible values:
* String with valid IP address. Default is IPv4 address of this host.
Related options:
* metadata_host
* my_block_storage_ip
* routing_source_ip
* vpn_ip
(string value)
#my_ip=10.89.115.220
my_ip = 10.10.10.52
The IP address which is used to connect to the block storage network.
Possible values:
* String with valid IP address. Default is IP address of this host.
Related options:
* my_ip - if my_block_storage_ip is not set, then my_ip value is used.
(string value)
#my_block_storage_ip=$my_ip
Hostname, FQDN or IP address of this host. Must be valid within AMQP key.
Possible values:
* String with hostname, FQDN or IP address. Default is hostname of this host.
(string value)
#host=lcy01-23
Assign IPv6 and IPv4 addresses when creating instances.
Related options:
* use_neutron: this only works with nova-network.
(boolean value)
#use_ipv6=false
This option is a list of full paths to one or more configuration files for
dhcpbridge. In most cases the default path of '/etc/nova/nova-dhcpbridge.conf'
should be sufficient, but if you have special needs for configuring
dhcpbridge,
you can change or add to this list.
Possible values
A list of strings, where each string is the full path to a dhcpbridge
configuration file.
(multi valued)
dhcpbridge_flagfile=/etc/nova/nova.conf
The location where the network configuration files will be kept. The default
is
the 'networks' directory off of the location where nova's Python module is
installed.
Possible values
A string containing the full path to the desired configuration directory
(string value)
#networks_path=$state_path/networks
This is the name of the network interface for public IP addresses. The default
is 'eth0'.
Possible values:
Any string representing a network interface name
(string value)
#public_interface=eth0
The location of the binary nova-dhcpbridge. By default it is the binary named
'nova-dhcpbridge' that is installed with all the other nova binaries.
Possible values:
Any string representing the full path to the binary for dhcpbridge
(string value)
dhcpbridge=/usr/bin/nova-dhcpbridge
This is the public IP address of the network host. It is used when creating a
SNAT rule.
Possible values:
Any valid IP address
Related options:
force_snat_range
(string value)
#routing_source_ip=$my_ip
The lifetime of a DHCP lease, in seconds. The default is 86400 (one day).
Possible values:
Any positive integer value.
(integer value)
Minimum value: 1
#dhcp_lease_time=86400
Despite the singular form of the name of this option, it is actually a list of
zero or more server addresses that dnsmasq will use for DNS nameservers. If
this is not empty, dnsmasq will not read /etc/resolv.conf, but will only use
the servers specified in this option. If the option use_network_dns_servers is
True, the dns1 and dns2 servers from the network will be appended to this
list,
and will be used as DNS servers, too.
Possible values:
A list of strings, where each string is either an IP address or a FQDN.
Related options:
use_network_dns_servers
(multi valued)
#dns_server =
When this option is set to True, the dns1 and dns2 servers for the network
specified by the user on boot will be used for DNS, as well as any specified
in
the dns_server
option.
Related options:
dns_server
(boolean value)
#use_network_dns_servers=false
This option is a list of zero or more IP address ranges in your network's DMZ
that should be accepted.
Possible values:
A list of strings, each of which should be a valid CIDR.
(list value)
#dmz_cidr =
This is a list of zero or more IP ranges that traffic from the
routing_source_ip
will be SNATted to. If the list is empty, then no SNAT
rules are created.
Possible values:
A list of strings, each of which should be a valid CIDR.
Related options:
routing_source_ip
(multi valued)
#force_snat_range =
The path to the custom dnsmasq configuration file, if any.
Possible values:
The full path to the configuration file, or an empty string if there is no
custom dnsmasq configuration file.
(string value)
#dnsmasq_config_file =
This is the class used as the ethernet device driver for linuxnet bridge
operations. The default value should be all you need for most cases, but if
you
wish to use a customized class, set this option to the full dot-separated
import path for that class.
Possible values:
Any string representing a dot-separated class path that Nova can import.
(string value)
#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver
The name of the Open vSwitch bridge that is used with linuxnet when connecting
with Open vSwitch."
Possible values:
Any string representing a valid bridge name.
(string value)
#linuxnet_ovs_integration_bridge=br-int
When True, when a device starts up, and upon binding floating IP addresses,
arp
messages will be sent to ensure that the arp caches on the compute hosts are
up-to-date.
Related options:
send_arp_for_ha_count
(boolean value)
#send_arp_for_ha=false
When arp messages are configured to be sent, they will be sent with the count
set to the value of this option. Of course, if this is set to zero, no arp
messages will be sent.
Possible values:
Any integer greater than or equal to 0
Related options:
send_arp_for_ha
(integer value)
#send_arp_for_ha_count=3
When set to True, only the firt nic of a VM will get its default gateway from
the DHCP server.
(boolean value)
#use_single_default_gateway=false
One or more interfaces that bridges can forward traffic to. If any of the
items
in this list is the special keyword 'all', then all traffic will be forwarded.
Possible values:
A list of zero or more interface names, or the word 'all'.
(multi valued)
#forward_bridge_interface=all
This option determines the IP address for the network metadata API server.
Possible values:
* Any valid IP address. The default is the address of the Nova API server.
Related options:
* metadata_port
(string value)
#metadata_host=$my_ip
This option determines the port used for the metadata API server.
Related options:
* metadata_host
(port value)
Minimum value: 0
Maximum value: 65535
#metadata_port=8775
This expression, if defined, will select any matching iptables rules and place
them at the top when applying metadata changes to the rules.
Possible values:
* Any string representing a valid regular expression, or an empty string
Related options:
* iptables_bottom_regex
(string value)
#iptables_top_regex =
This expression, if defined, will select any matching iptables rules and place
them at the bottom when applying metadata changes to the rules.
Possible values:
* Any string representing a valid regular expression, or an empty string
Related options:
* iptables_top_regex
(string value)
#iptables_bottom_regex =
By default, packets that do not pass the firewall are DROPped. In many cases,
though, an operator may find it more useful to change this from DROP to
REJECT,
so that the user issuing those packets may have a better idea as to what's
going on, or LOGDROP in order to record the blocked traffic before DROPping.
Possible values:
* A string representing an iptables chain. The default is DROP.
(string value)
#iptables_drop_action=DROP
This option represents the period of time, in seconds, that the ovs_vsctl
calls
will wait for a response from the database before timing out. A setting of 0
means that the utility should wait forever for a response.
Possible values:
* Any positive integer if a limited timeout is desired, or zero if the
calls should wait forever for a response.
(integer value)
Minimum value: 0
#ovs_vsctl_timeout=120
This option is used mainly in testing to avoid calls to the underlying network
utilities.
(boolean value)
#fake_network=false
This option determines the number of times to retry ebtables commands before
giving up. The minimum number of retries is 1.
Possible values:
* Any positive integer
Related options:
* ebtables_retry_interval
(integer value)
Minimum value: 1
#ebtables_exec_attempts=3
This option determines the time, in seconds, that the system will sleep in
between ebtables retries. Note that each successive retry waits a multiple of
this value, so for example, if this is set to the default of 1.0 seconds, and
ebtables_exec_attempts is 4, after the first failure, the system will sleep
for
1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0 seconds, and
after the third failure it will sleep 3 * 1.0 seconds.
Possible values:
* Any non-negative float or integer. Setting this to zero will result in
no
waiting between attempts.
Related options:
* ebtables_exec_attempts
(floating point value)
#ebtables_retry_interval=1.0
This option determines whether the network setup information is injected into
the VM before it is booted. While it was originally designed to be used only
by
nova-network, it is also used by the vmware and xenapi virt drivers to control
whether network information is injected into a VM.
(boolean value)
#flat_injected=false
DEPRECATED:
This option determines the bridge used for simple network interfaces when no
bridge is specified in the VM creation request.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any string representing a valid network bridge, such as 'br100'
Related options:
use_neutron
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#flat_network_bridge=
DEPRECATED:
This is the address of the DNS server for a simple network. If this option is
not specified, the default of '8.8.4.4' is used.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IP address.
Related options:
use_neutron
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#flat_network_dns=8.8.4.4
DEPRECATED:
This option is the name of the virtual interface of the VM on which the bridge
will be built. While it was originally designed to be used only by
nova-network, it is also used by libvirt for the bridge interface name.
Possible values:
Any valid virtual interface name, such as 'eth0'
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#flat_interface=
DEPRECATED:
This is the VLAN number used for private networks. Note that the when creating
the networks, if the specified number has already been assigned, nova-network
will increment this number until it finds an available VLAN.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment. It also will be ignored if the configuration
option
for network_manager
is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any integer between 1 and 4094. Values outside of that range will raise a
ValueError exception. Default = 100.
Related options:
network_manager
, use_neutron
(integer value)
Minimum value: 1
Maximum value: 4094
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#vlan_start=100
DEPRECATED:
This option is the name of the virtual interface of the VM on which the VLAN
bridge will be built. While it was originally designed to be used only by
nova-network, it is also used by libvirt and xenapi for the bridge interface
name.
Please note that this setting will be ignored in nova-network if the
configuration option for network_manager
is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any valid virtual interface name, such as 'eth0'
(string value)
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options. While
this option has an effect when using neutron, it incorrectly override the
value
provided by neutron and should therefore not be used.
#vlan_interface=
DEPRECATED:
This option represents the number of networks to create if not explicitly
specified when the network is created. The only time this is used is if a CIDR
is specified, but an explicit network_size is not. In that case, the subnets
are created by diving the IP address space of the CIDR by num_networks. The
resulting subnet sizes cannot be larger than the configuration option
network_size
; in that event, they are reduced to network_size
, and a
warning is logged.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any positive integer is technically valid, although there are practical
limits based upon available IP address space and virtual interfaces. The
default is 1.
Related options:
use_neutron
, network_size
(integer value)
Minimum value: 1
This option is deprecated for removal since 15.0.0.
Its value may be silently ignored in the future.
Reason:
nova-network is deprecated, as are any related configuration options.
#num_networks=1
MariaDB [nova]> select * from compute_nodes\G
*************************** 1. row ***************************
created_at: 2017-02-27 21:23:51
updated_at: 2017-02-28 15:44:49
deleted_at: NULL
id: 1
service_id: NULL
vcpus: 1
memory_mb: 7983
local_gb: 5
vcpus_used: 0
memory_mb_used: 359
local_gb_used: 2
hypervisor_type: QEMU
hypervisor_version: 2008000
cpu_info: {"vendor": "Intel", "model": "Westmere", "arch": "x86_64", "features": ["pge", "avx", "clflush", "sep", "syscall", "vme", "tsc", "xsave", "cmov", "fpu", "clflushopt", "pat", "monitor", "lm", "msr", "3dnowprefetch", "nx", "fxsr", "sse4.1", "pae", "sse4.2", "pclmuldq", "mmx", "osxsave", "cx8", "mce", "de", "rdtscp", "mca", "pse", "pni", "abm", "rdseed", "popcnt", "apic", "sse", "invtsc", "lahf_lm", "aes", "sse2", "hypervisor", "ssse3", "cx16", "pse36", "mtrr", "movbe", "rdrand", "x2apic"], "topology": {"cores": 1, "cells": 1, "threads": 1, "sockets": 1}}
disk_available_least: 2
free_ram_mb: 7471
free_disk_gb: 5
current_workload: 0
running_vms: 0
hypervisor_hostname: compute
deleted: 0
host_ip: 10.10.10.52
supported_instances: [["i686", "qemu", "hvm"], ["x86_64", "qemu", "hvm"]]
pci_stats: {"nova_object.version": "1.1", "nova_object.changes": ["objects"], "nova_object.name": "PciDevicePoolList", "nova_object.data": {"objects": []}, "nova_object.namespace": "nova"}
metrics: []
extra_resources: NULL
stats: {}
numa_topology: {"nova_object.version": "1.2", "nova_object.changes": ["cells"], "nova_object.name": "NUMATopology", "nova_object.data": {"cells": [{"nova_object.version": "1.2", "nova_object.changes": ["cpu_usage", "memory_usage", "cpuset", "mempages", "pinned_cpus", "memory", "siblings", "id"], "nova_object.name": "NUMACell", "nova_object.data": {"cpu_usage": 0, "memory_usage": 0, "cpuset": [0], "pinned_cpus": [], "siblings": [], "memory": 7983, "mempages": [{"nova_object.version": "1.1", "nova_object.changes": ["total", "used", "reserved", "size_kb"], "nova_object.name": "NUMAPagesTopology", "nova_object.data": {"used": 0, "total": 2043848, "reserved": 0, "size_kb": 4}, "nova_object.namespace": "nova"}, {"nova_object.version": "1.1", "nova_object.changes": ["total", "used", "reserved", "size_kb"], "nova_object.name": "NUMAPagesTopology", "nova_object.data": {"used": 0, "total": 0, "reserved": 0, "size_kb": 2048}, "nova_object.namespace": "nova"}], "id": 0}, "nova_object.namespace": "nova"}]}, "nova_object.namespace": "nova"}
host: compute
ram_allocation_ratio: 0
cpu_allocation_ratio: 0
uuid: 2e37ecce-5d56-4cf1-ab98-0a697b9373d2
disk_allocation_ratio: 0