The following is a sample nova configuration for adaptation and use. For a detailed overview of all available configuration options, refer to Configuration Options.
The sample configuration can also be viewed in file form
.
Important
The sample configuration file is auto-generated from nova when this documentation is built. You must ensure your version of nova matches the version of this documentation.
[DEFAULT]
#
# From nova.conf
#
#
# Availability zone for internal services.
#
# This option determines the availability zone for the various internal nova
# services, such as 'nova-scheduler', 'nova-conductor', etc.
#
# Possible values:
#
# * Any string representing an existing availability zone name.
# (string value)
#internal_service_availability_zone = internal
#
# Default availability zone for compute services.
#
# This option determines the default availability zone for 'nova-compute'
# services, which will be used if the service(s) do not belong to aggregates
# with
# availability zone metadata.
#
# Possible values:
#
# * Any string representing an existing availability zone name.
# (string value)
#default_availability_zone = nova
#
# Default availability zone for instances.
#
# This option determines the default availability zone for instances, which will
# be used when a user does not specify one when creating an instance. The
# instance(s) will be bound to this availability zone for their lifetime.
#
# Possible values:
#
# * Any string representing an existing availability zone name.
# * None, which means that the instance can move from one availability zone to
# another during its lifetime if it is moved from one compute node to another.
# (string value)
#default_schedule_zone = <None>
# Length of generated instance admin passwords. (integer value)
# Minimum value: 0
#password_length = 12
#
# Time period to generate instance usages for. It is possible to define optional
# offset to given period by appending @ character followed by a number defining
# offset.
#
# Possible values:
#
# * period, example: ``hour``, ``day``, ``month` or ``year``
# * period with offset, example: ``month@15`` will result in monthly audits
# starting on 15th day of month.
# (string value)
#instance_usage_audit_period = month
#
# Start and use a daemon that can run the commands that need to be run with
# root privileges. This option is usually enabled on nodes that run nova compute
# processes.
# (boolean value)
#use_rootwrap_daemon = false
#
# Path to the rootwrap configuration file.
#
# Goal of the root wrapper is to allow a service-specific unprivileged user to
# run a number of actions as the root user in the safest manner possible.
# The configuration file used here must match the one defined in the sudoers
# entry.
# (string value)
#rootwrap_config = /etc/nova/rootwrap.conf
# Explicitly specify the temporary working directory. (string value)
#tempdir = <None>
# DEPRECATED:
# Determine if monkey patching should be applied.
#
# Related options:
#
# * ``monkey_patch_modules``: This must have values set for this option to
# have any effect
# (boolean value)
# This option is deprecated for removal since 17.0.0.
# Its value may be silently ignored in the future.
# Reason:
# Monkey patching nova is not tested, not supported, and is a barrier
# for interoperability.
#monkey_patch = false
# DEPRECATED:
# List of modules/decorators to monkey patch.
#
# This option allows you to patch a decorator for all functions in specified
# modules.
#
# Possible values:
#
# * nova.compute.api:nova.notifications.notify_decorator
# * [...]
#
# Related options:
#
# * ``monkey_patch``: This must be set to ``True`` for this option to
# have any effect
# (list value)
# This option is deprecated for removal since 17.0.0.
# Its value may be silently ignored in the future.
# Reason:
# Monkey patching nova is not tested, not supported, and is a barrier
# for interoperability.
#monkey_patch_modules = nova.compute.api:nova.notifications.notify_decorator
#
# Defines which driver to use for controlling virtualization.
#
# Possible values:
#
# * ``libvirt.LibvirtDriver``
# * ``xenapi.XenAPIDriver``
# * ``fake.FakeDriver``
# * ``ironic.IronicDriver``
# * ``vmwareapi.VMwareVCDriver``
# * ``hyperv.HyperVDriver``
# * ``powervm.PowerVMDriver``
# (string value)
#compute_driver = <None>
#
# Allow destination machine to match source for resize. Useful when
# testing in single-host environments. By default it is not allowed
# to resize to the same host. Setting this option to true will add
# the same host to the destination options. Also set to true
# if you allow the ServerGroupAffinityFilter and need to resize.
# (boolean value)
#allow_resize_to_same_host = false
#
# Image properties that should not be inherited from the instance
# when taking a snapshot.
#
# This option gives an opportunity to select which image-properties
# should not be inherited by newly created snapshots.
#
# Possible values:
#
# * A comma-separated list whose item is an image property. Usually only
# the image properties that are only needed by base images can be included
# here, since the snapshots that are created from the base images don't
# need them.
# * Default list: cache_in_nova, bittorrent, img_signature_hash_method,
# img_signature, img_signature_key_type,
# img_signature_certificate_uuid
#
# (list value)
#non_inheritable_image_properties = cache_in_nova,bittorrent,img_signature_hash_method,img_signature,img_signature_key_type,img_signature_certificate_uuid
# DEPRECATED:
# When creating multiple instances with a single request using the
# os-multiple-create API extension, this template will be used to build
# the display name for each instance. The benefit is that the instances
# end up with different hostnames. Example display names when creating
# two VM's: name-1, name-2.
#
# Possible values:
#
# * Valid keys for the template are: name, uuid, count.
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# This config changes API behaviour. All changes in API behaviour should be
# discoverable.
#multi_instance_display_name_template = %(name)s-%(count)d
#
# Maximum number of devices that will result in a local image being
# created on the hypervisor node.
#
# A negative number means unlimited. Setting max_local_block_devices
# to 0 means that any request that attempts to create a local disk
# will fail. This option is meant to limit the number of local discs
# (so root local disc that is the result of --image being used, and
# any other ephemeral and swap disks). 0 does not mean that images
# will be automatically converted to volumes and boot instances from
# volumes - it just means that all requests that attempt to create a
# local disk will fail.
#
# Possible values:
#
# * 0: Creating a local disk is not allowed.
# * Negative number: Allows unlimited number of local discs.
# * Positive number: Allows only these many number of local discs.
# (Default value is 3).
# (integer value)
#max_local_block_devices = 3
#
# A comma-separated list of monitors that can be used for getting
# compute metrics. You can use the alias/name from the setuptools
# entry points for nova.compute.monitors.* namespaces. If no
# namespace is supplied, the "cpu." namespace is assumed for
# backwards-compatibility.
#
# NOTE: Only one monitor per namespace (For example: cpu) can be loaded at
# a time.
#
# Possible values:
#
# * An empty list will disable the feature (Default).
# * An example value that would enable both the CPU and NUMA memory
# bandwidth monitors that use the virt driver variant:
#
# compute_monitors = cpu.virt_driver, numa_mem_bw.virt_driver
# (list value)
#compute_monitors =
#
# The default format an ephemeral_volume will be formatted with on creation.
#
# Possible values:
#
# * ``ext2``
# * ``ext3``
# * ``ext4``
# * ``xfs``
# * ``ntfs`` (only for Windows guests)
# (string value)
#default_ephemeral_format = <None>
#
# Determine if instance should boot or fail on VIF plugging timeout.
#
# Nova sends a port update to Neutron after an instance has been scheduled,
# providing Neutron with the necessary information to finish setup of the port.
# Once completed, Neutron notifies Nova that it has finished setting up the
# port, at which point Nova resumes the boot of the instance since network
# connectivity is now supposed to be present. A timeout will occur if the reply
# is not received after a given interval.
#
# This option determines what Nova does when the VIF plugging timeout event
# happens. When enabled, the instance will error out. When disabled, the
# instance will continue to boot on the assumption that the port is ready.
#
# Possible values:
#
# * True: Instances should fail after VIF plugging timeout
# * False: Instances should continue booting after VIF plugging timeout
# (boolean value)
#vif_plugging_is_fatal = true
#
# Timeout for Neutron VIF plugging event message arrival.
#
# Number of seconds to wait for Neutron vif plugging events to
# arrive before continuing or failing (see 'vif_plugging_is_fatal').
#
# Related options:
#
# * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero and
# ``vif_plugging_is_fatal`` is False, events should not be expected to
# arrive at all.
# (integer value)
# Minimum value: 0
#vif_plugging_timeout = 300
# Path to '/etc/network/interfaces' template.
#
# The path to a template file for the '/etc/network/interfaces'-style file,
# which
# will be populated by nova and subsequently used by cloudinit. This provides a
# method to configure network connectivity in environments without a DHCP
# server.
#
# The template will be rendered using Jinja2 template engine, and receive a
# top-level key called ``interfaces``. This key will contain a list of
# dictionaries, one for each interface.
#
# Refer to the cloudinit documentaion for more information:
#
# https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
#
# Possible values:
#
# * A path to a Jinja2-formatted template for a Debian '/etc/network/interfaces'
# file. This applies even if using a non Debian-derived guest.
#
# Related options:
#
# * ``flat_inject``: This must be set to ``True`` to ensure nova embeds network
# configuration information in the metadata provided through the config drive.
# (string value)
#injected_network_template = $pybasedir/nova/virt/interfaces.template
#
# The image preallocation mode to use.
#
# Image preallocation allows storage for instance images to be allocated up
# front
# when the instance is initially provisioned. This ensures immediate feedback is
# given if enough space isn't available. In addition, it should significantly
# improve performance on writes to new blocks and may even improve I/O
# performance to prewritten blocks due to reduced fragmentation.
#
# Possible values:
#
# * "none" => no storage provisioning is done up front
# * "space" => storage is fully allocated at instance start
# (string value)
# Possible values:
# none - <No description provided>
# space - <No description provided>
#preallocate_images = none
#
# Enable use of copy-on-write (cow) images.
#
# QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
# backing files will not be used.
# (boolean value)
#use_cow_images = true
#
# Force conversion of backing images to raw format.
#
# Possible values:
#
# * True: Backing image files will be converted to raw image format
# * False: Backing image files will not be converted
#
# Related options:
#
# * ``compute_driver``: Only the libvirt driver uses this option.
# (boolean value)
#force_raw_images = true
#
# Name of the mkfs commands for ephemeral device.
#
# The format is <os_type>=<mkfs command>
# (multi valued)
#virt_mkfs =
#
# Enable resizing of filesystems via a block device.
#
# If enabled, attempt to resize the filesystem by accessing the image over a
# block device. This is done by the host and may not be necessary if the image
# contains a recent version of cloud-init. Possible mechanisms require the nbd
# driver (for qcow and raw), or loop (for raw).
# (boolean value)
#resize_fs_using_block_device = false
# Amount of time, in seconds, to wait for NBD device start up. (integer value)
# Minimum value: 0
#timeout_nbd = 10
#
# Location of cached images.
#
# This is NOT the full path - just a folder name relative to '$instances_path'.
# For per-compute-host cached images, set to '_base_$my_ip'
# (string value)
#image_cache_subdirectory_name = _base
# Should unused base images be removed? (boolean value)
#remove_unused_base_images = true
#
# Unused unresized base images younger than this will not be removed.
# (integer value)
#remove_unused_original_minimum_age_seconds = 86400
#
# Generic property to specify the pointer type.
#
# Input devices allow interaction with a graphical framebuffer. For
# example to provide a graphic tablet for absolute cursor movement.
#
# If set, the 'hw_pointer_model' image property takes precedence over
# this configuration option.
#
# Possible values:
#
# * None: Uses default behavior provided by drivers (mouse on PS2 for
# libvirt x86)
# * ps2mouse: Uses relative movement. Mouse connected by PS2
# * usbtablet: Uses absolute movement. Tablet connect by USB
#
# Related options:
#
# * usbtablet must be configured with VNC enabled or SPICE enabled and SPICE
# agent disabled. When used with libvirt the instance mode should be
# configured as HVM.
# (string value)
# Possible values:
# <None> - <No description provided>
# ps2mouse - <No description provided>
# usbtablet - <No description provided>
#pointer_model = usbtablet
#
# Defines which physical CPUs (pCPUs) can be used by instance
# virtual CPUs (vCPUs).
#
# Possible values:
#
# * A comma-separated list of physical CPU numbers that virtual CPUs can be
# allocated to by default. Each element should be either a single CPU number,
# a range of CPU numbers, or a caret followed by a CPU number to be
# excluded from a previous range. For example:
#
# vcpu_pin_set = "4-12,^8,15"
# (string value)
#vcpu_pin_set = <None>
#
# Number of huge/large memory pages to reserved per NUMA host cell.
#
# Possible values:
#
# * A list of valid key=value which reflect NUMA node ID, page size
# (Default unit is KiB) and number of pages to be reserved.
#
# reserved_huge_pages = node:0,size:2048,count:64
# reserved_huge_pages = node:1,size:1GB,count:1
#
# In this example we are reserving on NUMA node 0 64 pages of 2MiB
# and on NUMA node 1 1 page of 1GiB.
# (dict value)
#reserved_huge_pages = <None>
#
# Amount of disk resources in MB to make them always available to host. The
# disk usage gets reported back to the scheduler from nova-compute running
# on the compute nodes. To prevent the disk resources from being considered
# as available, this option can be used to reserve disk space for that host.
#
# Possible values:
#
# * Any positive integer representing amount of disk in MB to reserve
# for the host.
# (integer value)
# Minimum value: 0
#reserved_host_disk_mb = 0
#
# Amount of memory in MB to reserve for the host so that it is always available
# to host processes. The host resources usage is reported back to the scheduler
# continuously from nova-compute running on the compute node. To prevent the
# host
# memory from being considered as available, this option is used to reserve
# memory for the host.
#
# Possible values:
#
# * Any positive integer representing amount of memory in MB to reserve
# for the host.
# (integer value)
# Minimum value: 0
#reserved_host_memory_mb = 512
#
# Number of physical CPUs to reserve for the host. The host resources usage is
# reported back to the scheduler continuously from nova-compute running on the
# compute node. To prevent the host CPU from being considered as available,
# this option is used to reserve random pCPU(s) for the host.
#
# Possible values:
#
# * Any positive integer representing number of physical CPUs to reserve
# for the host.
# (integer value)
# Minimum value: 0
#reserved_host_cpus = 0
#
# This option helps you specify virtual CPU to physical CPU allocation ratio.
#
# From Ocata (15.0.0) this is used to influence the hosts selected by
# the Placement API. Note that when Placement is used, the CoreFilter
# is redundant, because the Placement API will have already filtered
# out hosts that would have failed the CoreFilter.
#
# This configuration specifies ratio for CoreFilter which can be set
# per compute node. For AggregateCoreFilter, it will fall back to this
# configuration value if no per-aggregate setting is found.
#
# NOTE: This can be set per-compute, or if set to 0.0, the value
# set on the scheduler node(s) or compute node(s) will be used
# and defaulted to 16.0. Once set to a non-default value, it is not possible
# to "unset" the config to get back to the default behavior. If you want
# to reset back to the default, explicitly specify 16.0.
#
# NOTE: As of the 16.0.0 Pike release, this configuration option is ignored
# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
#
# Possible values:
#
# * Any valid positive integer or float value
# (floating point value)
# Minimum value: 0
#cpu_allocation_ratio = 0.0
#
# This option helps you specify virtual RAM to physical RAM
# allocation ratio.
#
# From Ocata (15.0.0) this is used to influence the hosts selected by
# the Placement API. Note that when Placement is used, the RamFilter
# is redundant, because the Placement API will have already filtered
# out hosts that would have failed the RamFilter.
#
# This configuration specifies ratio for RamFilter which can be set
# per compute node. For AggregateRamFilter, it will fall back to this
# configuration value if no per-aggregate setting found.
#
# NOTE: This can be set per-compute, or if set to 0.0, the value
# set on the scheduler node(s) or compute node(s) will be used and
# defaulted to 1.5. Once set to a non-default value, it is not possible
# to "unset" the config to get back to the default behavior. If you want
# to reset back to the default, explicitly specify 1.5.
#
# NOTE: As of the 16.0.0 Pike release, this configuration option is ignored
# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
#
# Possible values:
#
# * Any valid positive integer or float value
# (floating point value)
# Minimum value: 0
#ram_allocation_ratio = 0.0
#
# This option helps you specify virtual disk to physical disk
# allocation ratio.
#
# From Ocata (15.0.0) this is used to influence the hosts selected by
# the Placement API. Note that when Placement is used, the DiskFilter
# is redundant, because the Placement API will have already filtered
# out hosts that would have failed the DiskFilter.
#
# A ratio greater than 1.0 will result in over-subscription of the
# available physical disk, which can be useful for more
# efficiently packing instances created with images that do not
# use the entire virtual disk, such as sparse or compressed
# images. It can be set to a value between 0.0 and 1.0 in order
# to preserve a percentage of the disk for uses other than
# instances.
#
# NOTE: This can be set per-compute, or if set to 0.0, the value
# set on the scheduler node(s) or compute node(s) will be used and
# defaulted to 1.0. Once set to a non-default value, it is not possible
# to "unset" the config to get back to the default behavior. If you want
# to reset back to the default, explicitly specify 1.0.
#
# NOTE: As of the 16.0.0 Pike release, this configuration option is ignored
# for the ironic.IronicDriver compute driver and is hardcoded to 1.0.
#
# Possible values:
#
# * Any valid positive integer or float value
# (floating point value)
# Minimum value: 0
#disk_allocation_ratio = 0.0
#
# Console proxy host to be used to connect to instances on this host. It is the
# publicly visible name for the console host.
#
# Possible values:
#
# * Current hostname (default) or any string representing hostname.
# (string value)
#console_host = <current_hostname>
#
# Name of the network to be used to set access IPs for instances. If there are
# multiple IPs to choose from, an arbitrary one will be chosen.
#
# Possible values:
#
# * None (default)
# * Any string representing network name.
# (string value)
#default_access_ip_network_name = <None>
#
# Whether to batch up the application of IPTables rules during a host restart
# and apply all at the end of the init phase.
# (boolean value)
#defer_iptables_apply = false
#
# Specifies where instances are stored on the hypervisor's disk.
# It can point to locally attached storage or a directory on NFS.
#
# Possible values:
#
# * $state_path/instances where state_path is a config option that specifies
# the top-level directory for maintaining nova's state. (default) or
# Any string representing directory path.
#
# Related options:
#
# * ``[workarounds]/ensure_libvirt_rbd_instance_dir_cleanup``
# (string value)
#instances_path = $state_path/instances
#
# This option enables periodic compute.instance.exists notifications. Each
# compute node must be configured to generate system usage data. These
# notifications are consumed by OpenStack Telemetry service.
# (boolean value)
#instance_usage_audit = false
#
# Maximum number of 1 second retries in live_migration. It specifies number
# of retries to iptables when it complains. It happens when an user continuously
# sends live-migration request to same host leading to concurrent request
# to iptables.
#
# Possible values:
#
# * Any positive integer representing retry count.
# (integer value)
# Minimum value: 0
#live_migration_retry_count = 30
#
# This option specifies whether to start guests that were running before the
# host rebooted. It ensures that all of the instances on a Nova compute node
# resume their state each time the compute node boots or restarts.
# (boolean value)
#resume_guests_state_on_host_boot = false
#
# Number of times to retry network allocation. It is required to attempt network
# allocation retries if the virtual interface plug fails.
#
# Possible values:
#
# * Any positive integer representing retry count.
# (integer value)
# Minimum value: 0
#network_allocate_retries = 0
#
# Limits the maximum number of instance builds to run concurrently by
# nova-compute. Compute service can attempt to build an infinite number of
# instances, if asked to do so. This limit is enforced to avoid building
# unlimited instance concurrently on a compute node. This value can be set
# per compute node.
#
# Possible Values:
#
# * 0 : treated as unlimited.
# * Any positive integer representing maximum concurrent builds.
# (integer value)
# Minimum value: 0
#max_concurrent_builds = 10
#
# Maximum number of live migrations to run concurrently. This limit is enforced
# to avoid outbound live migrations overwhelming the host/network and causing
# failures. It is not recommended that you change this unless you are very sure
# that doing so is safe and stable in your environment.
#
# Possible values:
#
# * 0 : treated as unlimited.
# * Negative value defaults to 0.
# * Any positive integer representing maximum number of live migrations
# to run concurrently.
# (integer value)
#max_concurrent_live_migrations = 1
#
# Number of times to retry block device allocation on failures. Starting with
# Liberty, Cinder can use image volume cache. This may help with block device
# allocation performance. Look at the cinder image_volume_cache_enabled
# configuration option.
#
# Possible values:
#
# * 60 (default)
# * If value is 0, then one attempt is made.
# * Any negative value is treated as 0.
# * For any value > 0, total attempts are (value + 1)
# (integer value)
#block_device_allocate_retries = 60
#
# Number of greenthreads available for use to sync power states.
#
# This option can be used to reduce the number of concurrent requests
# made to the hypervisor or system with real instance power states
# for performance reasons, for example, with Ironic.
#
# Possible values:
#
# * Any positive integer representing greenthreads count.
# (integer value)
#sync_power_state_pool_size = 1000
#
# Number of seconds to wait between runs of the image cache manager.
#
# Possible values:
# * 0: run at the default rate.
# * -1: disable
# * Any other value
# (integer value)
# Minimum value: -1
#image_cache_manager_interval = 2400
#
# Interval to pull network bandwidth usage info.
#
# Not supported on all hypervisors. If a hypervisor doesn't support bandwidth
# usage, it will not get the info in the usage events.
#
# Possible values:
#
# * 0: Will run at the default periodic interval.
# * Any value < 0: Disables the option.
# * Any positive integer in seconds.
# (integer value)
#bandwidth_poll_interval = 600
#
# Interval to sync power states between the database and the hypervisor.
#
# The interval that Nova checks the actual virtual machine power state
# and the power state that Nova has in its database. If a user powers
# down their VM, Nova updates the API to report the VM has been
# powered down. Should something turn on the VM unexpectedly,
# Nova will turn the VM back off to keep the system in the expected
# state.
#
# Possible values:
#
# * 0: Will run at the default periodic interval.
# * Any value < 0: Disables the option.
# * Any positive integer in seconds.
#
# Related options:
#
# * If ``handle_virt_lifecycle_events`` in workarounds_group is
# false and this option is negative, then instances that get out
# of sync between the hypervisor and the Nova database will have
# to be synchronized manually.
# (integer value)
#sync_power_state_interval = 600
#
# Interval between instance network information cache updates.
#
# Number of seconds after which each compute node runs the task of
# querying Neutron for all of its instances networking information,
# then updates the Nova db with that information. Nova will never
# update it's cache if this option is set to 0. If we don't update the
# cache, the metadata service and nova-api endpoints will be proxying
# incorrect network data about the instance. So, it is not recommended
# to set this option to 0.
#
# Possible values:
#
# * Any positive integer in seconds.
# * Any value <=0 will disable the sync. This is not recommended.
# (integer value)
#heal_instance_info_cache_interval = 60
#
# Interval for reclaiming deleted instances.
#
# A value greater than 0 will enable SOFT_DELETE of instances.
# This option decides whether the server to be deleted will be put into
# the SOFT_DELETED state. If this value is greater than 0, the deleted
# server will not be deleted immediately, instead it will be put into
# a queue until it's too old (deleted time greater than the value of
# reclaim_instance_interval). The server can be recovered from the
# delete queue by using the restore action. If the deleted server remains
# longer than the value of reclaim_instance_interval, it will be
# deleted by a periodic task in the compute service automatically.
#
# Note that this option is read from both the API and compute nodes, and
# must be set globally otherwise servers could be put into a soft deleted
# state in the API and never actually reclaimed (deleted) on the compute
# node.
#
# Possible values:
#
# * Any positive integer(in seconds) greater than 0 will enable
# this option.
# * Any value <=0 will disable the option.
# (integer value)
#reclaim_instance_interval = 0
#
# Interval for gathering volume usages.
#
# This option updates the volume usage cache for every
# volume_usage_poll_interval number of seconds.
#
# Possible values:
#
# * Any positive integer(in seconds) greater than 0 will enable
# this option.
# * Any value <=0 will disable the option.
# (integer value)
#volume_usage_poll_interval = 0
#
# Interval for polling shelved instances to offload.
#
# The periodic task runs for every shelved_poll_interval number
# of seconds and checks if there are any shelved instances. If it
# finds a shelved instance, based on the 'shelved_offload_time' config
# value it offloads the shelved instances. Check 'shelved_offload_time'
# config option description for details.
#
# Possible values:
#
# * Any value <= 0: Disables the option.
# * Any positive integer in seconds.
#
# Related options:
#
# * ``shelved_offload_time``
# (integer value)
#shelved_poll_interval = 3600
#
# Time before a shelved instance is eligible for removal from a host.
#
# By default this option is set to 0 and the shelved instance will be
# removed from the hypervisor immediately after shelve operation.
# Otherwise, the instance will be kept for the value of
# shelved_offload_time(in seconds) so that during the time period the
# unshelve action will be faster, then the periodic task will remove
# the instance from hypervisor after shelved_offload_time passes.
#
# Possible values:
#
# * 0: Instance will be immediately offloaded after being
# shelved.
# * Any value < 0: An instance will never offload.
# * Any positive integer in seconds: The instance will exist for
# the specified number of seconds before being offloaded.
# (integer value)
#shelved_offload_time = 0
#
# Interval for retrying failed instance file deletes.
#
# This option depends on 'maximum_instance_delete_attempts'.
# This option specifies how often to retry deletes whereas
# 'maximum_instance_delete_attempts' specifies the maximum number
# of retry attempts that can be made.
#
# Possible values:
#
# * 0: Will run at the default periodic interval.
# * Any value < 0: Disables the option.
# * Any positive integer in seconds.
#
# Related options:
#
# * ``maximum_instance_delete_attempts`` from instance_cleaning_opts
# group.
# (integer value)
#instance_delete_interval = 300
#
# Interval (in seconds) between block device allocation retries on failures.
#
# This option allows the user to specify the time interval between
# consecutive retries. 'block_device_allocate_retries' option specifies
# the maximum number of retries.
#
# Possible values:
#
# * 0: Disables the option.
# * Any positive integer in seconds enables the option.
#
# Related options:
#
# * ``block_device_allocate_retries`` in compute_manager_opts group.
# (integer value)
# Minimum value: 0
#block_device_allocate_retries_interval = 3
#
# Interval between sending the scheduler a list of current instance UUIDs to
# verify that its view of instances is in sync with nova.
#
# If the CONF option 'scheduler_tracks_instance_changes' is
# False, the sync calls will not be made. So, changing this option will
# have no effect.
#
# If the out of sync situations are not very common, this interval
# can be increased to lower the number of RPC messages being sent.
# Likewise, if sync issues turn out to be a problem, the interval
# can be lowered to check more frequently.
#
# Possible values:
#
# * 0: Will run at the default periodic interval.
# * Any value < 0: Disables the option.
# * Any positive integer in seconds.
#
# Related options:
#
# * This option has no impact if ``scheduler_tracks_instance_changes``
# is set to False.
# (integer value)
#scheduler_instance_sync_interval = 120
#
# Interval for updating compute resources.
#
# This option specifies how often the update_available_resources
# periodic task should run. A number less than 0 means to disable the
# task completely. Leaving this at the default of 0 will cause this to
# run at the default periodic interval. Setting it to any positive
# value will cause it to run at approximately that number of seconds.
#
# Possible values:
#
# * 0: Will run at the default periodic interval.
# * Any value < 0: Disables the option.
# * Any positive integer in seconds.
# (integer value)
#update_resources_interval = 0
#
# Time interval after which an instance is hard rebooted automatically.
#
# When doing a soft reboot, it is possible that a guest kernel is
# completely hung in a way that causes the soft reboot task
# to not ever finish. Setting this option to a time period in seconds
# will automatically hard reboot an instance if it has been stuck
# in a rebooting state longer than N seconds.
#
# Possible values:
#
# * 0: Disables the option (default).
# * Any positive integer in seconds: Enables the option.
# (integer value)
# Minimum value: 0
#reboot_timeout = 0
#
# Maximum time in seconds that an instance can take to build.
#
# If this timer expires, instance status will be changed to ERROR.
# Enabling this option will make sure an instance will not be stuck
# in BUILD state for a longer period.
#
# Possible values:
#
# * 0: Disables the option (default)
# * Any positive integer in seconds: Enables the option.
# (integer value)
# Minimum value: 0
#instance_build_timeout = 0
#
# Interval to wait before un-rescuing an instance stuck in RESCUE.
#
# Possible values:
#
# * 0: Disables the option (default)
# * Any positive integer in seconds: Enables the option.
# (integer value)
# Minimum value: 0
#rescue_timeout = 0
#
# Automatically confirm resizes after N seconds.
#
# Resize functionality will save the existing server before resizing.
# After the resize completes, user is requested to confirm the resize.
# The user has the opportunity to either confirm or revert all
# changes. Confirm resize removes the original server and changes
# server status from resized to active. Setting this option to a time
# period (in seconds) will automatically confirm the resize if the
# server is in resized state longer than that time.
#
# Possible values:
#
# * 0: Disables the option (default)
# * Any positive integer in seconds: Enables the option.
# (integer value)
# Minimum value: 0
#resize_confirm_window = 0
#
# Total time to wait in seconds for an instance toperform a clean
# shutdown.
#
# It determines the overall period (in seconds) a VM is allowed to
# perform a clean shutdown. While performing stop, rescue and shelve,
# rebuild operations, configuring this option gives the VM a chance
# to perform a controlled shutdown before the instance is powered off.
# The default timeout is 60 seconds.
#
# The timeout value can be overridden on a per image basis by means
# of os_shutdown_timeout that is an image metadata setting allowing
# different types of operating systems to specify how much time they
# need to shut down cleanly.
#
# Possible values:
#
# * Any positive integer in seconds (default value is 60).
# (integer value)
# Minimum value: 1
#shutdown_timeout = 60
#
# The compute service periodically checks for instances that have been
# deleted in the database but remain running on the compute node. The
# above option enables action to be taken when such instances are
# identified.
#
# Possible values:
#
# * reap: Powers down the instances and deletes them(default)
# * log: Logs warning message about deletion of the resource
# * shutdown: Powers down instances and marks them as non-
# bootable which can be later used for debugging/analysis
# * noop: Takes no action
#
# Related options:
#
# * running_deleted_instance_poll_interval
# * running_deleted_instance_timeout
# (string value)
# Possible values:
# noop - <No description provided>
# log - <No description provided>
# shutdown - <No description provided>
# reap - <No description provided>
#running_deleted_instance_action = reap
#
# Time interval in seconds to wait between runs for the clean up action.
# If set to 0, above check will be disabled. If "running_deleted_instance
# _action" is set to "log" or "reap", a value greater than 0 must be set.
#
# Possible values:
#
# * Any positive integer in seconds enables the option.
# * 0: Disables the option.
# * 1800: Default value.
#
# Related options:
#
# * running_deleted_instance_action
# (integer value)
#running_deleted_instance_poll_interval = 1800
#
# Time interval in seconds to wait for the instances that have
# been marked as deleted in database to be eligible for cleanup.
#
# Possible values:
#
# * Any positive integer in seconds(default is 0).
#
# Related options:
#
# * "running_deleted_instance_action"
# (integer value)
#running_deleted_instance_timeout = 0
#
# The number of times to attempt to reap an instance's files.
#
# This option specifies the maximum number of retry attempts
# that can be made.
#
# Possible values:
#
# * Any positive integer defines how many attempts are made.
# * Any value <=0 means no delete attempts occur, but you should use
# ``instance_delete_interval`` to disable the delete attempts.
#
# Related options:
# * ``instance_delete_interval`` in interval_opts group can be used to disable
# this option.
# (integer value)
#maximum_instance_delete_attempts = 5
#
# Sets the scope of the check for unique instance names.
#
# The default doesn't check for unique names. If a scope for the name check is
# set, a launch of a new instance or an update of an existing instance with a
# duplicate name will result in an ''InstanceExists'' error. The uniqueness is
# case-insensitive. Setting this option can increase the usability for end
# users as they don't have to distinguish among instances with the same name
# by their IDs.
#
# Possible values:
#
# * '': An empty value means that no uniqueness check is done and duplicate
# names are possible.
# * "project": The instance name check is done only for instances within the
# same project.
# * "global": The instance name check is done for all instances regardless of
# the project.
# (string value)
# Possible values:
# '' - <No description provided>
# project - <No description provided>
# global - <No description provided>
#osapi_compute_unique_server_name_scope =
#
# Enable new nova-compute services on this host automatically.
#
# When a new nova-compute service starts up, it gets
# registered in the database as an enabled service. Sometimes it can be useful
# to register new compute services in disabled state and then enabled them at a
# later point in time. This option only sets this behavior for nova-compute
# services, it does not auto-disable other services like nova-conductor,
# nova-scheduler, nova-consoleauth, or nova-osapi_compute.
#
# Possible values:
#
# * ``True``: Each new compute service is enabled as soon as it registers
# itself.
# * ``False``: Compute services must be enabled via an os-services REST API call
# or with the CLI with ``nova service-enable <hostname> <binary>``, otherwise
# they are not ready to use.
# (boolean value)
#enable_new_services = true
#
# Template string to be used to generate instance names.
#
# This template controls the creation of the database name of an instance. This
# is *not* the display name you enter when creating an instance (via Horizon
# or CLI). For a new deployment it is advisable to change the default value
# (which uses the database autoincrement) to another value which makes use
# of the attributes of an instance, like ``instance-%(uuid)s``. If you
# already have instances in your deployment when you change this, your
# deployment will break.
#
# Possible values:
#
# * A string which either uses the instance database ID (like the
# default)
# * A string with a list of named database columns, for example ``%(id)d``
# or ``%(uuid)s`` or ``%(hostname)s``.
#
# Related options:
#
# * not to be confused with: ``multi_instance_display_name_template``
# (string value)
#instance_name_template = instance-%08x
#
# Number of times to retry live-migration before failing.
#
# Possible values:
#
# * If == -1, try until out of hosts (default)
# * If == 0, only try once, no retries
# * Integer greater than 0
# (integer value)
# Minimum value: -1
#migrate_max_retries = -1
#
# Configuration drive format
#
# Configuration drive format that will contain metadata attached to the
# instance when it boots.
#
# Possible values:
#
# * iso9660: A file system image standard that is widely supported across
# operating systems. NOTE: Mind the libvirt bug
# (https://bugs.launchpad.net/nova/+bug/1246201) - If your hypervisor
# driver is libvirt, and you want live migrate to work without shared storage,
# then use VFAT.
# * vfat: For legacy reasons, you can configure the configuration drive to
# use VFAT format instead of ISO 9660.
#
# Related options:
#
# * This option is meaningful when one of the following alternatives occur:
# 1. force_config_drive option set to 'true'
# 2. the REST API call to create the instance contains an enable flag for
# config drive option
# 3. the image used to create the instance requires a config drive,
# this is defined by img_config_drive property for that image.
# * A compute node running Hyper-V hypervisor can be configured to attach
# configuration drive as a CD drive. To attach the configuration drive as a CD
# drive, set config_drive_cdrom option at hyperv section, to true.
# (string value)
# Possible values:
# iso9660 - <No description provided>
# vfat - <No description provided>
#config_drive_format = iso9660
#
# Force injection to take place on a config drive
#
# When this option is set to true configuration drive functionality will be
# forced enabled by default, otherwise user can still enable configuration
# drives via the REST API or image metadata properties.
#
# Possible values:
#
# * True: Force to use of configuration drive regardless the user's input in the
# REST API call.
# * False: Do not force use of configuration drive. Config drives can still be
# enabled via the REST API or image metadata properties.
#
# Related options:
#
# * Use the 'mkisofs_cmd' flag to set the path where you install the
# genisoimage program. If genisoimage is in same path as the
# nova-compute service, you do not need to set this flag.
# * To use configuration drive with Hyper-V, you must set the
# 'mkisofs_cmd' value to the full path to an mkisofs.exe installation.
# Additionally, you must set the qemu_img_cmd value in the hyperv
# configuration section to the full path to an qemu-img command
# installation.
# (boolean value)
#force_config_drive = false
#
# Name or path of the tool used for ISO image creation
#
# Use the mkisofs_cmd flag to set the path where you install the genisoimage
# program. If genisoimage is on the system path, you do not need to change
# the default value.
#
# To use configuration drive with Hyper-V, you must set the mkisofs_cmd value
# to the full path to an mkisofs.exe installation. Additionally, you must set
# the qemu_img_cmd value in the hyperv configuration section to the full path
# to an qemu-img command installation.
#
# Possible values:
#
# * Name of the ISO image creator program, in case it is in the same directory
# as the nova-compute service
# * Path to ISO image creator program
#
# Related options:
#
# * This option is meaningful when config drives are enabled.
# * To use configuration drive with Hyper-V, you must set the qemu_img_cmd
# value in the hyperv configuration section to the full path to an qemu-img
# command installation.
# (string value)
#mkisofs_cmd = genisoimage
# DEPRECATED: The driver to use for database access (string value)
# This option is deprecated for removal since 13.0.0.
# Its value may be silently ignored in the future.
#db_driver = nova.db
# DEPRECATED:
# Default flavor to use for the EC2 API only.
# The Nova API does not support a default flavor.
# (string value)
# This option is deprecated for removal since 14.0.0.
# Its value may be silently ignored in the future.
# Reason: The EC2 API is deprecated.
#default_flavor = m1.small
#
# The IP address which the host is using to connect to the management network.
#
# Possible values:
#
# * String with valid IP address. Default is IPv4 address of this host.
#
# Related options:
#
# * metadata_host
# * my_block_storage_ip
# * routing_source_ip
# * vpn_ip
# (string value)
#my_ip = <host_ipv4>
#
# The IP address which is used to connect to the block storage network.
#
# Possible values:
#
# * String with valid IP address. Default is IP address of this host.
#
# Related options:
#
# * my_ip - if my_block_storage_ip is not set, then my_ip value is used.
# (string value)
#my_block_storage_ip = $my_ip
#
# Hostname, FQDN or IP address of this host.
#
# Used as:
#
# * the oslo.messaging queue name for nova-compute worker
# * we use this value for the binding_host sent to neutron. This means if you
# use
# a neutron agent, it should have the same value for host.
# * cinder host attachment information
#
# Must be valid within AMQP key.
#
# Possible values:
#
# * String with hostname, FQDN or IP address. Default is hostname of this host.
# (string value)
#host = <current_hostname>
# DEPRECATED:
# This option is a list of full paths to one or more configuration files for
# dhcpbridge. In most cases the default path of '/etc/nova/nova-dhcpbridge.conf'
# should be sufficient, but if you have special needs for configuring
# dhcpbridge,
# you can change or add to this list.
#
# Possible values
#
# * A list of strings, where each string is the full path to a dhcpbridge
# configuration file.
# (multi valued)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#dhcpbridge_flagfile = /etc/nova/nova-dhcpbridge.conf
# DEPRECATED:
# The location where the network configuration files will be kept. The default
# is
# the 'networks' directory off of the location where nova's Python module is
# installed.
#
# Possible values
#
# * A string containing the full path to the desired configuration directory
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#networks_path = $state_path/networks
# DEPRECATED:
# This is the name of the network interface for public IP addresses. The default
# is 'eth0'.
#
# Possible values:
#
# * Any string representing a network interface name
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#public_interface = eth0
# DEPRECATED:
# The location of the binary nova-dhcpbridge. By default it is the binary named
# 'nova-dhcpbridge' that is installed with all the other nova binaries.
#
# Possible values:
#
# * Any string representing the full path to the binary for dhcpbridge
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#dhcpbridge = $bindir/nova-dhcpbridge
# DEPRECATED:
# The public IP address of the network host.
#
# This is used when creating an SNAT rule.
#
# Possible values:
#
# * Any valid IP address
#
# Related options:
#
# * ``force_snat_range``
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#routing_source_ip = $my_ip
# DEPRECATED:
# The lifetime of a DHCP lease, in seconds. The default is 86400 (one day).
#
# Possible values:
#
# * Any positive integer value.
# (integer value)
# Minimum value: 1
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#dhcp_lease_time = 86400
# DEPRECATED:
# Despite the singular form of the name of this option, it is actually a list of
# zero or more server addresses that dnsmasq will use for DNS nameservers. If
# this is not empty, dnsmasq will not read /etc/resolv.conf, but will only use
# the servers specified in this option. If the option use_network_dns_servers is
# True, the dns1 and dns2 servers from the network will be appended to this
# list,
# and will be used as DNS servers, too.
#
# Possible values:
#
# * A list of strings, where each string is either an IP address or a FQDN.
#
# Related options:
#
# * ``use_network_dns_servers``
# (multi valued)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#dns_server =
# DEPRECATED:
# When this option is set to True, the dns1 and dns2 servers for the network
# specified by the user on boot will be used for DNS, as well as any specified
# in
# the `dns_server` option.
#
# Related options:
#
# * ``dns_server``
# (boolean value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#use_network_dns_servers = false
# DEPRECATED:
# This option is a list of zero or more IP address ranges in your network's DMZ
# that should be accepted.
#
# Possible values:
#
# * A list of strings, each of which should be a valid CIDR.
# (list value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#dmz_cidr =
# DEPRECATED:
# This is a list of zero or more IP ranges that traffic from the
# `routing_source_ip` will be SNATted to. If the list is empty, then no SNAT
# rules are created.
#
# Possible values:
#
# * A list of strings, each of which should be a valid CIDR.
#
# Related options:
#
# * ``routing_source_ip``
# (multi valued)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#force_snat_range =
# DEPRECATED:
# The path to the custom dnsmasq configuration file, if any.
#
# Possible values:
#
# * The full path to the configuration file, or an empty string if there is no
# custom dnsmasq configuration file.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#dnsmasq_config_file =
# DEPRECATED:
# This is the class used as the ethernet device driver for linuxnet bridge
# operations. The default value should be all you need for most cases, but if
# you
# wish to use a customized class, set this option to the full dot-separated
# import path for that class.
#
# Possible values:
#
# * Any string representing a dot-separated class path that Nova can import.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#linuxnet_interface_driver = nova.network.linux_net.LinuxBridgeInterfaceDriver
# DEPRECATED:
# The name of the Open vSwitch bridge that is used with linuxnet when connecting
# with Open vSwitch."
#
# Possible values:
#
# * Any string representing a valid bridge name.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#linuxnet_ovs_integration_bridge = br-int
#
# When True, when a device starts up, and upon binding floating IP addresses,
# arp
# messages will be sent to ensure that the arp caches on the compute hosts are
# up-to-date.
#
# Related options:
#
# * ``send_arp_for_ha_count``
# (boolean value)
#send_arp_for_ha = false
#
# When arp messages are configured to be sent, they will be sent with the count
# set to the value of this option. Of course, if this is set to zero, no arp
# messages will be sent.
#
# Possible values:
#
# * Any integer greater than or equal to 0
#
# Related options:
#
# * ``send_arp_for_ha``
# (integer value)
#send_arp_for_ha_count = 3
# DEPRECATED:
# When set to True, only the firt nic of a VM will get its default gateway from
# the DHCP server.
# (boolean value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#use_single_default_gateway = false
# DEPRECATED:
# One or more interfaces that bridges can forward traffic to. If any of the
# items
# in this list is the special keyword 'all', then all traffic will be forwarded.
#
# Possible values:
#
# * A list of zero or more interface names, or the word 'all'.
# (multi valued)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#forward_bridge_interface = all
#
# This option determines the IP address for the network metadata API server.
#
# This is really the client side of the metadata host equation that allows
# nova-network to find the metadata server when doing a default multi host
# networking.
#
# Possible values:
#
# * Any valid IP address. The default is the address of the Nova API server.
#
# Related options:
#
# * ``metadata_port``
# (string value)
#metadata_host = $my_ip
# DEPRECATED:
# This option determines the port used for the metadata API server.
#
# Related options:
#
# * ``metadata_host``
# (port value)
# Minimum value: 0
# Maximum value: 65535
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#metadata_port = 8775
# DEPRECATED:
# This expression, if defined, will select any matching iptables rules and place
# them at the top when applying metadata changes to the rules.
#
# Possible values:
#
# * Any string representing a valid regular expression, or an empty string
#
# Related options:
#
# * ``iptables_bottom_regex``
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#iptables_top_regex =
# DEPRECATED:
# This expression, if defined, will select any matching iptables rules and place
# them at the bottom when applying metadata changes to the rules.
#
# Possible values:
#
# * Any string representing a valid regular expression, or an empty string
#
# Related options:
#
# * iptables_top_regex
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#iptables_bottom_regex =
# DEPRECATED:
# By default, packets that do not pass the firewall are DROPped. In many cases,
# though, an operator may find it more useful to change this from DROP to
# REJECT,
# so that the user issuing those packets may have a better idea as to what's
# going on, or LOGDROP in order to record the blocked traffic before DROPping.
#
# Possible values:
#
# * A string representing an iptables chain. The default is DROP.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#iptables_drop_action = DROP
# DEPRECATED:
# This option represents the period of time, in seconds, that the ovs_vsctl
# calls
# will wait for a response from the database before timing out. A setting of 0
# means that the utility should wait forever for a response.
#
# Possible values:
#
# * Any positive integer if a limited timeout is desired, or zero if the calls
# should wait forever for a response.
# (integer value)
# Minimum value: 0
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ovs_vsctl_timeout = 120
# DEPRECATED:
# This option is used mainly in testing to avoid calls to the underlying network
# utilities.
# (boolean value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#fake_network = false
# DEPRECATED:
# This option determines the number of times to retry ebtables commands before
# giving up. The minimum number of retries is 1.
#
# Possible values:
#
# * Any positive integer
#
# Related options:
#
# * ``ebtables_retry_interval``
# (integer value)
# Minimum value: 1
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ebtables_exec_attempts = 3
# DEPRECATED:
# This option determines the time, in seconds, that the system will sleep in
# between ebtables retries. Note that each successive retry waits a multiple of
# this value, so for example, if this is set to the default of 1.0 seconds, and
# ebtables_exec_attempts is 4, after the first failure, the system will sleep
# for
# 1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0 seconds, and
# after the third failure it will sleep 3 * 1.0 seconds.
#
# Possible values:
#
# * Any non-negative float or integer. Setting this to zero will result in no
# waiting between attempts.
#
# Related options:
#
# * ebtables_exec_attempts
# (floating point value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ebtables_retry_interval = 1.0
# DEPRECATED:
# Enable neutron as the backend for networking.
#
# Determine whether to use Neutron or Nova Network as the back end. Set to true
# to use neutron.
# (boolean value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#use_neutron = true
#
# This option determines whether the network setup information is injected into
# the VM before it is booted. While it was originally designed to be used only
# by nova-network, it is also used by the vmware and xenapi virt drivers to
# control whether network information is injected into a VM. The libvirt virt
# driver also uses it when we use config_drive to configure network to control
# whether network information is injected into a VM.
# (boolean value)
#flat_injected = false
# DEPRECATED:
# This option determines the bridge used for simple network interfaces when no
# bridge is specified in the VM creation request.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment.
#
# Possible values:
#
# * Any string representing a valid network bridge, such as 'br100'
#
# Related options:
#
# * ``use_neutron``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#flat_network_bridge = <None>
# DEPRECATED:
# This is the address of the DNS server for a simple network. If this option is
# not specified, the default of '8.8.4.4' is used.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment.
#
# Possible values:
#
# * Any valid IP address.
#
# Related options:
#
# * ``use_neutron``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#flat_network_dns = 8.8.4.4
# DEPRECATED:
# This option is the name of the virtual interface of the VM on which the bridge
# will be built. While it was originally designed to be used only by
# nova-network, it is also used by libvirt for the bridge interface name.
#
# Possible values:
#
# * Any valid virtual interface name, such as 'eth0'
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#flat_interface = <None>
# DEPRECATED:
# This is the VLAN number used for private networks. Note that the when creating
# the networks, if the specified number has already been assigned, nova-network
# will increment this number until it finds an available VLAN.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment. It also will be ignored if the configuration
# option
# for `network_manager` is not set to the default of
# 'nova.network.manager.VlanManager'.
#
# Possible values:
#
# * Any integer between 1 and 4094. Values outside of that range will raise a
# ValueError exception.
#
# Related options:
#
# * ``network_manager``
# * ``use_neutron``
# (integer value)
# Minimum value: 1
# Maximum value: 4094
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#vlan_start = 100
# DEPRECATED:
# This option is the name of the virtual interface of the VM on which the VLAN
# bridge will be built. While it was originally designed to be used only by
# nova-network, it is also used by libvirt and xenapi for the bridge interface
# name.
#
# Please note that this setting will be ignored in nova-network if the
# configuration option for `network_manager` is not set to the default of
# 'nova.network.manager.VlanManager'.
#
# Possible values:
#
# * Any valid virtual interface name, such as 'eth0'
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options. While
# this option has an effect when using neutron, it incorrectly override the
# value
# provided by neutron and should therefore not be used.
#vlan_interface = <None>
# DEPRECATED:
# This option represents the number of networks to create if not explicitly
# specified when the network is created. The only time this is used is if a CIDR
# is specified, but an explicit network_size is not. In that case, the subnets
# are created by diving the IP address space of the CIDR by num_networks. The
# resulting subnet sizes cannot be larger than the configuration option
# `network_size`; in that event, they are reduced to `network_size`, and a
# warning is logged.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment.
#
# Possible values:
#
# * Any positive integer is technically valid, although there are practical
# limits based upon available IP address space and virtual interfaces.
#
# Related options:
#
# * ``use_neutron``
# * ``network_size``
# (integer value)
# Minimum value: 1
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#num_networks = 1
# DEPRECATED:
# This option is no longer used since the /os-cloudpipe API was removed in the
# 16.0.0 Pike release. This is the public IP address for the cloudpipe VPN
# servers. It defaults to the IP address of the host.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment. It also will be ignored if the configuration
# option
# for `network_manager` is not set to the default of
# 'nova.network.manager.VlanManager'.
#
# Possible values:
#
# * Any valid IP address. The default is ``$my_ip``, the IP address of the VM.
#
# Related options:
#
# * ``network_manager``
# * ``use_neutron``
# * ``vpn_start``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#vpn_ip = $my_ip
# DEPRECATED:
# This is the port number to use as the first VPN port for private networks.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment. It also will be ignored if the configuration
# option
# for `network_manager` is not set to the default of
# 'nova.network.manager.VlanManager', or if you specify a value the 'vpn_start'
# parameter when creating a network.
#
# Possible values:
#
# * Any integer representing a valid port number. The default is 1000.
#
# Related options:
#
# * ``use_neutron``
# * ``vpn_ip``
# * ``network_manager``
# (port value)
# Minimum value: 0
# Maximum value: 65535
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#vpn_start = 1000
# DEPRECATED:
# This option determines the number of addresses in each private subnet.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment.
#
# Possible values:
#
# * Any positive integer that is less than or equal to the available network
# size. Note that if you are creating multiple networks, they must all fit in
# the available IP address space. The default is 256.
#
# Related options:
#
# * ``use_neutron``
# * ``num_networks``
# (integer value)
# Minimum value: 1
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#network_size = 256
# DEPRECATED:
# This option determines the fixed IPv6 address block when creating a network.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment.
#
# Possible values:
#
# * Any valid IPv6 CIDR
#
# Related options:
#
# * ``use_neutron``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#fixed_range_v6 = fd00::/48
# DEPRECATED:
# This is the default IPv4 gateway. It is used only in the testing suite.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment.
#
# Possible values:
#
# * Any valid IP address.
#
# Related options:
#
# * ``use_neutron``
# * ``gateway_v6``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#gateway = <None>
# DEPRECATED:
# This is the default IPv6 gateway. It is used only in the testing suite.
#
# Please note that this option is only used when using nova-network instead of
# Neutron in your deployment.
#
# Possible values:
#
# * Any valid IP address.
#
# Related options:
#
# * ``use_neutron``
# * ``gateway``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#gateway_v6 = <None>
# DEPRECATED:
# This option represents the number of IP addresses to reserve at the top of the
# address range for VPN clients. It also will be ignored if the configuration
# option for `network_manager` is not set to the default of
# 'nova.network.manager.VlanManager'.
#
# Possible values:
#
# * Any integer, 0 or greater.
#
# Related options:
#
# * ``use_neutron``
# * ``network_manager``
# (integer value)
# Minimum value: 0
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#cnt_vpn_clients = 0
# DEPRECATED:
# This is the number of seconds to wait before disassociating a deallocated
# fixed
# IP address. This is only used with the nova-network service, and has no effect
# when using neutron for networking.
#
# Possible values:
#
# * Any integer, zero or greater.
#
# Related options:
#
# * ``use_neutron``
# (integer value)
# Minimum value: 0
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#fixed_ip_disassociate_timeout = 600
# DEPRECATED:
# This option determines how many times nova-network will attempt to create a
# unique MAC address before giving up and raising a
# `VirtualInterfaceMacAddressException` error.
#
# Possible values:
#
# * Any positive integer. The default is 5.
#
# Related options:
#
# * ``use_neutron``
# (integer value)
# Minimum value: 1
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#create_unique_mac_address_attempts = 5
# DEPRECATED:
# Determines whether unused gateway devices, both VLAN and bridge, are deleted
# if
# the network is in nova-network VLAN mode and is multi-hosted.
#
# Related options:
#
# * ``use_neutron``
# * ``vpn_ip``
# * ``fake_network``
# (boolean value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#teardown_unused_network_gateway = false
# DEPRECATED:
# When this option is True, a call is made to release the DHCP for the instance
# when that instance is terminated.
#
# Related options:
#
# * ``use_neutron``
# (boolean value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#force_dhcp_release = true
# DEPRECATED:
# When this option is True, whenever a DNS entry must be updated, a fanout cast
# message is sent to all network hosts to update their DNS entries in multi-host
# mode.
#
# Related options:
#
# * ``use_neutron``
# (boolean value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#update_dns_entries = false
# DEPRECATED:
# This option determines the time, in seconds, to wait between refreshing DNS
# entries for the network.
#
# Possible values:
#
# * A positive integer
# * -1 to disable updates
#
# Related options:
#
# * ``use_neutron``
# (integer value)
# Minimum value: -1
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#dns_update_periodic_interval = -1
# DEPRECATED:
# This option allows you to specify the domain for the DHCP server.
#
# Possible values:
#
# * Any string that is a valid domain name.
#
# Related options:
#
# * ``use_neutron``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#dhcp_domain = novalocal
# DEPRECATED:
# This option allows you to specify the L3 management library to be used.
#
# Possible values:
#
# * Any dot-separated string that represents the import path to an L3 networking
# library.
#
# Related options:
#
# * ``use_neutron``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#l3_lib = nova.network.l3.LinuxNetL3
# DEPRECATED:
# THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
#
# If True in multi_host mode, all compute hosts share the same dhcp address. The
# same IP address used for DHCP will be added on each nova-network node which is
# only visible to the VMs on the same host.
#
# The use of this configuration has been deprecated and may be removed in any
# release after Mitaka. It is recommended that instead of relying on this
# option,
# an explicit value should be passed to 'create_networks()' as a keyword
# argument
# with the name 'share_address'.
# (boolean value)
# This option is deprecated for removal since 2014.2.
# Its value may be silently ignored in the future.
#share_dhcp_address = false
# DEPRECATED:
# URL for LDAP server which will store DNS entries
#
# Possible values:
#
# * A valid LDAP URL representing the server
# (uri value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_url = ldap://ldap.example.com:389
# DEPRECATED: Bind user for LDAP server (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_user = uid=admin,ou=people,dc=example,dc=org
# DEPRECATED: Bind user's password for LDAP server (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_password = password
# DEPRECATED:
# Hostmaster for LDAP DNS driver Statement of Authority
#
# Possible values:
#
# * Any valid string representing LDAP DNS hostmaster.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_soa_hostmaster = hostmaster@example.org
# DEPRECATED:
# DNS Servers for LDAP DNS driver
#
# Possible values:
#
# * A valid URL representing a DNS server
# (multi valued)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_servers = dns.example.org
# DEPRECATED:
# Base distinguished name for the LDAP search query
#
# This option helps to decide where to look up the host in LDAP.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_base_dn = ou=hosts,dc=example,dc=org
# DEPRECATED:
# Refresh interval (in seconds) for LDAP DNS driver Start of Authority
#
# Time interval, a secondary/slave DNS server waits before requesting for
# primary DNS server's current SOA record. If the records are different,
# secondary DNS server will request a zone transfer from primary.
#
# NOTE: Lower values would cause more traffic.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_soa_refresh = 1800
# DEPRECATED:
# Retry interval (in seconds) for LDAP DNS driver Start of Authority
#
# Time interval, a secondary/slave DNS server should wait, if an
# attempt to transfer zone failed during the previous refresh interval.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_soa_retry = 3600
# DEPRECATED:
# Expiry interval (in seconds) for LDAP DNS driver Start of Authority
#
# Time interval, a secondary/slave DNS server holds the information
# before it is no longer considered authoritative.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_soa_expiry = 86400
# DEPRECATED:
# Minimum interval (in seconds) for LDAP DNS driver Start of Authority
#
# It is Minimum time-to-live applies for all resource records in the
# zone file. This value is supplied to other servers how long they
# should keep the data in cache.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ldap_dns_soa_minimum = 7200
# DEPRECATED:
# Default value for multi_host in networks.
#
# nova-network service can operate in a multi-host or single-host mode.
# In multi-host mode each compute node runs a copy of nova-network and the
# instances on that compute node use the compute node as a gateway to the
# Internet. Where as in single-host mode, a central server runs the nova-network
# service. All compute nodes forward traffic from the instances to the
# cloud controller which then forwards traffic to the Internet.
#
# If this options is set to true, some rpc network calls will be sent directly
# to host.
#
# Note that this option is only used when using nova-network instead of
# Neutron in your deployment.
#
# Related options:
#
# * ``use_neutron``
# (boolean value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#multi_host = false
# DEPRECATED:
# Driver to use for network creation.
#
# Network driver initializes (creates bridges and so on) only when the
# first VM lands on a host node. All network managers configure the
# network using network drivers. The driver is not tied to any particular
# network manager.
#
# The default Linux driver implements vlans, bridges, and iptables rules
# using linux utilities.
#
# Note that this option is only used when using nova-network instead
# of Neutron in your deployment.
#
# Related options:
#
# * ``use_neutron``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#network_driver = nova.network.linux_net
# DEPRECATED:
# Firewall driver to use with ``nova-network`` service.
#
# This option only applies when using the ``nova-network`` service. When using
# another networking services, such as Neutron, this should be to set to the
# ``nova.virt.firewall.NoopFirewallDriver``.
#
# Possible values:
#
# * ``nova.virt.firewall.IptablesFirewallDriver``
# * ``nova.virt.firewall.NoopFirewallDriver``
# * ``nova.virt.libvirt.firewall.IptablesFirewallDriver``
# * [...]
#
# Related options:
#
# * ``use_neutron``: This must be set to ``False`` to enable ``nova-network``
# networking
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#firewall_driver = nova.virt.firewall.NoopFirewallDriver
# DEPRECATED:
# Determine whether to allow network traffic from same network.
#
# When set to true, hosts on the same subnet are not filtered and are allowed
# to pass all types of traffic between them. On a flat network, this allows
# all instances from all projects unfiltered communication. With VLAN
# networking, this allows access between instances within the same project.
#
# This option only applies when using the ``nova-network`` service. When using
# another networking services, such as Neutron, security groups or other
# approaches should be used.
#
# Possible values:
#
# * True: Network traffic should be allowed pass between all instances on the
# same network, regardless of their tenant and security policies
# * False: Network traffic should not be allowed pass between instances unless
# it is unblocked in a security group
#
# Related options:
#
# * ``use_neutron``: This must be set to ``False`` to enable ``nova-network``
# networking
# * ``firewall_driver``: This must be set to
# ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure the
# libvirt firewall driver is enabled.
# (boolean value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#allow_same_net_traffic = true
# DEPRECATED:
# Default pool for floating IPs.
#
# This option specifies the default floating IP pool for allocating floating
# IPs.
#
# While allocating a floating ip, users can optionally pass in the name of the
# pool they want to allocate from, otherwise it will be pulled from the
# default pool.
#
# If this option is not set, then 'nova' is used as default floating pool.
#
# Possible values:
#
# * Any string representing a floating IP pool name
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# This option was used for two purposes: to set the floating IP pool name for
# nova-network and to do the same for neutron. nova-network is deprecated, as
# are
# any related configuration options. Users of neutron, meanwhile, should use the
# 'default_floating_pool' option in the '[neutron]' group.
#default_floating_pool = nova
# DEPRECATED:
# Autoassigning floating IP to VM
#
# When set to True, floating IP is auto allocated and associated
# to the VM upon creation.
#
# Related options:
#
# * use_neutron: this options only works with nova-network.
# (boolean value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#auto_assign_floating_ip = false
# DEPRECATED:
# Full class name for the DNS Manager for floating IPs.
#
# This option specifies the class of the driver that provides functionality
# to manage DNS entries associated with floating IPs.
#
# When a user adds a DNS entry for a specified domain to a floating IP,
# nova will add a DNS entry using the specified floating DNS driver.
# When a floating IP is deallocated, its DNS entry will automatically be
# deleted.
#
# Possible values:
#
# * Full Python path to the class to be used
#
# Related options:
#
# * use_neutron: this options only works with nova-network.
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#floating_ip_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
# DEPRECATED:
# Full class name for the DNS Manager for instance IPs.
#
# This option specifies the class of the driver that provides functionality
# to manage DNS entries for instances.
#
# On instance creation, nova will add DNS entries for the instance name and
# id, using the specified instance DNS driver and domain. On instance deletion,
# nova will remove the DNS entries.
#
# Possible values:
#
# * Full Python path to the class to be used
#
# Related options:
#
# * use_neutron: this options only works with nova-network.
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#instance_dns_manager = nova.network.noop_dns_driver.NoopDNSDriver
# DEPRECATED:
# If specified, Nova checks if the availability_zone of every instance matches
# what the database says the availability_zone should be for the specified
# dns_domain.
#
# Related options:
#
# * use_neutron: this options only works with nova-network.
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#instance_dns_domain =
# DEPRECATED:
# Assign IPv6 and IPv4 addresses when creating instances.
#
# Related options:
#
# * use_neutron: this only works with nova-network.
# (boolean value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#use_ipv6 = false
# DEPRECATED:
# Abstracts out IPv6 address generation to pluggable backends.
#
# nova-network can be put into dual-stack mode, so that it uses
# both IPv4 and IPv6 addresses. In dual-stack mode, by default, instances
# acquire IPv6 global unicast addresses with the help of stateless address
# auto-configuration mechanism.
#
# Related options:
#
# * use_neutron: this option only works with nova-network.
# * use_ipv6: this option only works if ipv6 is enabled for nova-network.
# (string value)
# Possible values:
# rfc2462 - <No description provided>
# account_identifier - <No description provided>
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#ipv6_backend = rfc2462
# DEPRECATED:
# This option is used to enable or disable quota checking for tenant networks.
#
# Related options:
#
# * quota_networks
# (boolean value)
# This option is deprecated for removal since 14.0.0.
# Its value may be silently ignored in the future.
# Reason:
# CRUD operations on tenant networks are only available when using nova-network
# and nova-network is itself deprecated.
#enable_network_quota = false
# DEPRECATED:
# This option controls the number of private networks that can be created per
# project (or per tenant).
#
# Related options:
#
# * enable_network_quota
# (integer value)
# Minimum value: 0
# This option is deprecated for removal since 14.0.0.
# Its value may be silently ignored in the future.
# Reason:
# CRUD operations on tenant networks are only available when using nova-network
# and nova-network is itself deprecated.
#quota_networks = 3
#
# Filename that will be used for storing websocket frames received
# and sent by a proxy service (like VNC, spice, serial) running on this host.
# If this is not set, no recording will be done.
# (string value)
#record = <None>
# Run as a background process. (boolean value)
#daemon = false
# Disallow non-encrypted connections. (boolean value)
#ssl_only = false
# Set to True if source host is addressed with IPv6. (boolean value)
#source_is_ipv6 = false
# Path to SSL certificate file. (string value)
#cert = self.pem
# SSL key file (if separate from cert). (string value)
#key = <None>
#
# Path to directory with content which will be served by a web server.
# (string value)
#web = /usr/share/spice-html5
#
# The directory where the Nova python modules are installed.
#
# This directory is used to store template files for networking and remote
# console access. It is also the default path for other config options which
# need to persist Nova internal data. It is very unlikely that you need to
# change this option from its default value.
#
# Possible values:
#
# * The full path to a directory.
#
# Related options:
#
# * ``state_path``
# (string value)
#pybasedir = /home/zuul/.venv/lib/python3.5/site-packages
#
# The directory where the Nova binaries are installed.
#
# This option is only relevant if the networking capabilities from Nova are
# used (see services below). Nova's networking capabilities are targeted to
# be fully replaced by Neutron in the future. It is very unlikely that you need
# to change this option from its default value.
#
# Possible values:
#
# * The full path to a directory.
# (string value)
#bindir = /home/zuul/.venv/local/bin
#
# The top-level directory for maintaining Nova's state.
#
# This directory is used to store Nova's internal state. It is used by a
# variety of other config options which derive from this. In some scenarios
# (for example migrations) it makes sense to use a storage location which is
# shared between multiple compute hosts (for example via NFS). Unless the
# option ``instances_path`` gets overwritten, this directory can grow very
# large.
#
# Possible values:
#
# * The full path to a directory. Defaults to value provided in ``pybasedir``.
# (string value)
#state_path = $pybasedir
#
# Number of seconds indicating how frequently the state of services on a
# given hypervisor is reported. Nova needs to know this to determine the
# overall health of the deployment.
#
# Related Options:
#
# * service_down_time
# report_interval should be less than service_down_time. If service_down_time
# is less than report_interval, services will routinely be considered down,
# because they report in too rarely.
# (integer value)
#report_interval = 10
#
# Maximum time in seconds since last check-in for up service
#
# Each compute node periodically updates their database status based on the
# specified report interval. If the compute node hasn't updated the status
# for more than service_down_time, then the compute node is considered down.
#
# Related Options:
#
# * report_interval (service_down_time should not be less than report_interval)
# (integer value)
#service_down_time = 60
#
# Enable periodic tasks.
#
# If set to true, this option allows services to periodically run tasks
# on the manager.
#
# In case of running multiple schedulers or conductors you may want to run
# periodic tasks on only one host - in this case disable this option for all
# hosts but one.
# (boolean value)
#periodic_enable = true
#
# Number of seconds to randomly delay when starting the periodic task
# scheduler to reduce stampeding.
#
# When compute workers are restarted in unison across a cluster,
# they all end up running the periodic tasks at the same time
# causing problems for the external services. To mitigate this
# behavior, periodic_fuzzy_delay option allows you to introduce a
# random initial delay when starting the periodic task scheduler.
#
# Possible Values:
#
# * Any positive integer (in seconds)
# * 0 : disable the random delay
# (integer value)
# Minimum value: 0
#periodic_fuzzy_delay = 60
# List of APIs to be enabled by default. (list value)
#enabled_apis = osapi_compute,metadata
#
# List of APIs with enabled SSL.
#
# Nova provides SSL support for the API servers. enabled_ssl_apis option
# allows configuring the SSL support.
# (list value)
#enabled_ssl_apis =
#
# IP address on which the OpenStack API will listen.
#
# The OpenStack API service listens on this IP address for incoming
# requests.
# (string value)
#osapi_compute_listen = 0.0.0.0
#
# Port on which the OpenStack API will listen.
#
# The OpenStack API service listens on this port number for incoming
# requests.
# (port value)
# Minimum value: 0
# Maximum value: 65535
#osapi_compute_listen_port = 8774
#
# Number of workers for OpenStack API service. The default will be the number
# of CPUs available.
#
# OpenStack API services can be configured to run as multi-process (workers).
# This overcomes the problem of reduction in throughput when API request
# concurrency increases. OpenStack API service will run in the specified
# number of processes.
#
# Possible Values:
#
# * Any positive integer
# * None (default value)
# (integer value)
# Minimum value: 1
#osapi_compute_workers = <None>
#
# IP address on which the metadata API will listen.
#
# The metadata API service listens on this IP address for incoming
# requests.
# (string value)
#metadata_listen = 0.0.0.0
#
# Port on which the metadata API will listen.
#
# The metadata API service listens on this port number for incoming
# requests.
# (port value)
# Minimum value: 0
# Maximum value: 65535
#metadata_listen_port = 8775
#
# Number of workers for metadata service. If not specified the number of
# available CPUs will be used.
#
# The metadata service can be configured to run as multi-process (workers).
# This overcomes the problem of reduction in throughput when API request
# concurrency increases. The metadata service will run in the specified
# number of processes.
#
# Possible Values:
#
# * Any positive integer
# * None (default value)
# (integer value)
# Minimum value: 1
#metadata_workers = <None>
# Full class name for the Manager for network (string value)
# Possible values:
# nova.network.manager.FlatManager - <No description provided>
# nova.network.manager.FlatDHCPManager - <No description provided>
# nova.network.manager.VlanManager - <No description provided>
#network_manager = nova.network.manager.VlanManager
#
# This option specifies the driver to be used for the servicegroup service.
#
# ServiceGroup API in nova enables checking status of a compute node. When a
# compute worker running the nova-compute daemon starts, it calls the join API
# to join the compute group. Services like nova scheduler can query the
# ServiceGroup API to check if a node is alive. Internally, the ServiceGroup
# client driver automatically updates the compute worker status. There are
# multiple backend implementations for this service: Database ServiceGroup
# driver
# and Memcache ServiceGroup driver.
#
# Possible Values:
#
# * db : Database ServiceGroup driver
# * mc : Memcache ServiceGroup driver
#
# Related Options:
#
# * service_down_time (maximum time since last check-in for up service)
# (string value)
# Possible values:
# db - <No description provided>
# mc - <No description provided>
#servicegroup_driver = db
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Note: This option can be changed without restarting.
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and Linux
# platform is used. This option is ignored if log_config_append is set. (boolean
# value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append is
# set. (boolean value)
#use_syslog = false
# Enable journald for logging. If running in a systemd environment you may wish
# to enable journal support. Doing so will use the journal native protocol which
# includes structured metadata in addition to log messages.This option is
# ignored if log_config_append is set. (boolean value)
#use_journal = false
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Use JSON formatting for logging. This option is ignored if log_config_append
# is set. (boolean value)
#use_json = false
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = false
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message is
# DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Interval, number of seconds, of log rate limiting. (integer value)
#rate_limit_interval = 0
# Maximum number of logged messages per rate_limit_interval. (integer value)
#rate_limit_burst = 0
# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or
# empty string. Logs with level greater or equal to rate_limit_except_level are
# not filtered. An empty string means that all levels are filtered. (string
# value)
#rate_limit_except_level = CRITICAL
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
#
# From oslo.messaging
#
# Size of RPC connection pool. (integer value)
#rpc_conn_pool_size = 30
# The pool size limit for connections expiration policy (integer value)
#conn_pool_min_size = 2
# The time-to-live in sec of idle connections in the pool (integer value)
#conn_pool_ttl = 1200
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address. (string value)
#rpc_zmq_bind_address = *
# MatchMaker driver. (string value)
# Possible values:
# redis - <No description provided>
# sentinel - <No description provided>
# dummy - <No description provided>
#rpc_zmq_matchmaker = redis
# Number of ZeroMQ contexts, defaults to 1. (integer value)
#rpc_zmq_contexts = 1
# Maximum number of ingress messages to locally buffer per topic. Default is
# unlimited. (integer value)
#rpc_zmq_topic_backlog = <None>
# Directory for holding IPC sockets. (string value)
#rpc_zmq_ipc_dir = /var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
# "host" option, if running Nova. (string value)
#rpc_zmq_host = localhost
# Number of seconds to wait before all pending messages will be sent after
# closing a socket. The default value of -1 specifies an infinite linger period.
# The value of 0 specifies no linger period. Pending messages shall be discarded
# immediately when the socket is closed. Positive values specify an upper bound
# for the linger period. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_cast_timeout
#zmq_linger = -1
# The default number of seconds that poll should wait. Poll raises timeout
# exception when timeout expired. (integer value)
#rpc_poll_timeout = 1
# Expiration timeout in seconds of a name service record about existing target (
# < 0 means no timeout). (integer value)
#zmq_target_expire = 300
# Update period in seconds of a name service record about existing target.
# (integer value)
#zmq_target_update = 180
# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
# value)
#use_pub_sub = false
# Use ROUTER remote proxy. (boolean value)
#use_router_proxy = false
# This option makes direct connections dynamic or static. It makes sense only
# with use_router_proxy=False which means to use direct connections for direct
# message types (ignored otherwise). (boolean value)
#use_dynamic_connections = false
# How many additional connections to a host will be made for failover reasons.
# This option is actual only in dynamic connections mode. (integer value)
#zmq_failover_connections = 2
# Minimal port number for random ports range. (port value)
# Minimum value: 0
# Maximum value: 65535
#rpc_zmq_min_port = 49153
# Maximal port number for random ports range. (integer value)
# Minimum value: 1
# Maximum value: 65536
#rpc_zmq_max_port = 65536
# Number of retries to find free port number before fail with ZMQBindError.
# (integer value)
#rpc_zmq_bind_port_retries = 100
# Default serialization mechanism for serializing/deserializing
# outgoing/incoming messages (string value)
# Possible values:
# json - <No description provided>
# msgpack - <No description provided>
#rpc_zmq_serialization = json
# This option configures round-robin mode in zmq socket. True means not keeping
# a queue when server side disconnects. False means to keep queue and messages
# even if server is disconnected, when the server appears we send all
# accumulated messages to it. (boolean value)
#zmq_immediate = true
# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any
# other negative value) means to skip any overrides and leave it to OS default;
# 0 and 1 (or any other positive value) mean to disable and enable the option
# respectively. (integer value)
#zmq_tcp_keepalive = -1
# The duration between two keepalive transmissions in idle condition. The unit
# is platform dependent, for example, seconds in Linux, milliseconds in Windows
# etc. The default value of -1 (or any other negative value and 0) means to skip
# any overrides and leave it to OS default. (integer value)
#zmq_tcp_keepalive_idle = -1
# The number of retransmissions to be carried out before declaring that remote
# end is not available. The default value of -1 (or any other negative value and
# 0) means to skip any overrides and leave it to OS default. (integer value)
#zmq_tcp_keepalive_cnt = -1
# The duration between two successive keepalive retransmissions, if
# acknowledgement to the previous keepalive transmission is not received. The
# unit is platform dependent, for example, seconds in Linux, milliseconds in
# Windows etc. The default value of -1 (or any other negative value and 0) means
# to skip any overrides and leave it to OS default. (integer value)
#zmq_tcp_keepalive_intvl = -1
# Maximum number of (green) threads to work concurrently. (integer value)
#rpc_thread_pool_size = 100
# Expiration timeout in seconds of a sent/received message after which it is not
# tracked anymore by a client/server. (integer value)
#rpc_message_ttl = 300
# Wait for message acknowledgements from receivers. This mechanism works only
# via proxy without PUB/SUB. (boolean value)
#rpc_use_acks = false
# Number of seconds to wait for an ack from a cast/call. After each retry
# attempt this timeout is multiplied by some specified multiplier. (integer
# value)
#rpc_ack_timeout_base = 15
# Number to multiply base ack timeout by after each retry attempt. (integer
# value)
#rpc_ack_timeout_multiplier = 2
# Default number of message sending attempts in case of any problems occurred:
# positive value N means at most N retries, 0 means no retries, None or -1 (or
# any other negative values) mean to retry forever. This option is used only if
# acknowledgments are enabled. (integer value)
#rpc_retry_attempts = 3
# List of publisher hosts SubConsumer can subscribe on. This option has higher
# priority then the default publishers list taken from the matchmaker. (list
# value)
#subscribe_on =
# Size of executor thread pool when executor is threading or eventlet. (integer
# value)
# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
#executor_thread_pool_size = 64
# Seconds to wait for a response from a call. (integer value)
#rpc_response_timeout = 60
# The network address and optional user credentials for connecting to the
# messaging backend, in URL format. The expected format is:
#
# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query
#
# Example: rabbit://rabbitmq:password@127.0.0.1:5672//
#
# For full details on the fields in the URL see the documentation of
# oslo_messaging.TransportURL at
# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html
# (string value)
#transport_url = <None>
# DEPRECATED: The messaging driver to use, defaults to rabbit. Other drivers
# include amqp and zmq. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#rpc_backend = rabbit
# The default exchange under which topics are scoped. May be overridden by an
# exchange name specified in the transport_url option. (string value)
#control_exchange = openstack
#
# From oslo.service.periodic_task
#
# Some periodic tasks can be run in a separate process. Should we run them here?
# (boolean value)
#run_external_periodic_tasks = true
#
# From oslo.service.service
#
# Enable eventlet backdoor. Acceptable values are 0, <port>, and <start>:<end>,
# where 0 results in listening on a random tcp port number; <port> results in
# listening on the specified port number (and not enabling backdoor if that port
# is in use); and <start>:<end> results in listening on the smallest unused port
# number within the specified range of port numbers. The chosen port is
# displayed in the service's log file. (string value)
#backdoor_port = <None>
# Enable eventlet backdoor, using the provided path as a unix socket that can
# receive connections. This option is mutually exclusive with 'backdoor_port' in
# that only one should be provided. If both are provided then the existence of
# this option overrides the usage of that option. (string value)
#backdoor_socket = <None>
# Enables or disables logging values of all registered options when starting a
# service (at DEBUG level). (boolean value)
#log_options = true
# Specify a timeout after which a gracefully shutdown server will exit. Zero
# value means endless wait. (integer value)
#graceful_shutdown_timeout = 60
[api]
#
# Options under this group are used to define Nova API.
#
# From nova.conf
#
#
# This determines the strategy to use for authentication: keystone or noauth2.
# 'noauth2' is designed for testing only, as it does no actual credential
# checking. 'noauth2' provides administrative credentials only if 'admin' is
# specified as the username.
# (string value)
# Possible values:
# keystone - <No description provided>
# noauth2 - <No description provided>
#auth_strategy = keystone
#
# When True, the 'X-Forwarded-For' header is treated as the canonical remote
# address. When False (the default), the 'remote_address' header is used.
#
# You should only enable this if you have an HTML sanitizing proxy.
# (boolean value)
#use_forwarded_for = false
#
# When gathering the existing metadata for a config drive, the EC2-style
# metadata is returned for all versions that don't appear in this option.
# As of the Liberty release, the available versions are:
#
# * 1.0
# * 2007-01-19
# * 2007-03-01
# * 2007-08-29
# * 2007-10-10
# * 2007-12-15
# * 2008-02-01
# * 2008-09-01
# * 2009-04-04
#
# The option is in the format of a single string, with each version separated
# by a space.
#
# Possible values:
#
# * Any string that represents zero or more versions, separated by spaces.
# (string value)
#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
#
# A list of vendordata providers.
#
# vendordata providers are how deployers can provide metadata via configdrive
# and metadata that is specific to their deployment. There are currently two
# supported providers: StaticJSON and DynamicJSON.
#
# StaticJSON reads a JSON file configured by the flag vendordata_jsonfile_path
# and places the JSON from that file into vendor_data.json and
# vendor_data2.json.
#
# DynamicJSON is configured via the vendordata_dynamic_targets flag, which is
# documented separately. For each of the endpoints specified in that flag, a
# section is added to the vendor_data2.json.
#
# For more information on the requirements for implementing a vendordata
# dynamic endpoint, please see the vendordata.rst file in the nova developer
# reference.
#
# Possible values:
#
# * A list of vendordata providers, with StaticJSON and DynamicJSON being
# current options.
#
# Related options:
#
# * vendordata_dynamic_targets
# * vendordata_dynamic_ssl_certfile
# * vendordata_dynamic_connect_timeout
# * vendordata_dynamic_read_timeout
# * vendordata_dynamic_failure_fatal
# (list value)
#vendordata_providers = StaticJSON
#
# A list of targets for the dynamic vendordata provider. These targets are of
# the form <name>@<url>.
#
# The dynamic vendordata provider collects metadata by contacting external REST
# services and querying them for information about the instance. This behaviour
# is documented in the vendordata.rst file in the nova developer reference.
# (list value)
#vendordata_dynamic_targets =
#
# Path to an optional certificate file or CA bundle to verify dynamic
# vendordata REST services ssl certificates against.
#
# Possible values:
#
# * An empty string, or a path to a valid certificate file
#
# Related options:
#
# * vendordata_providers
# * vendordata_dynamic_targets
# * vendordata_dynamic_connect_timeout
# * vendordata_dynamic_read_timeout
# * vendordata_dynamic_failure_fatal
# (string value)
#vendordata_dynamic_ssl_certfile =
#
# Maximum wait time for an external REST service to connect.
#
# Possible values:
#
# * Any integer with a value greater than three (the TCP packet retransmission
# timeout). Note that instance start may be blocked during this wait time,
# so this value should be kept small.
#
# Related options:
#
# * vendordata_providers
# * vendordata_dynamic_targets
# * vendordata_dynamic_ssl_certfile
# * vendordata_dynamic_read_timeout
# * vendordata_dynamic_failure_fatal
# (integer value)
# Minimum value: 3
#vendordata_dynamic_connect_timeout = 5
#
# Maximum wait time for an external REST service to return data once connected.
#
# Possible values:
#
# * Any integer. Note that instance start is blocked during this wait time,
# so this value should be kept small.
#
# Related options:
#
# * vendordata_providers
# * vendordata_dynamic_targets
# * vendordata_dynamic_ssl_certfile
# * vendordata_dynamic_connect_timeout
# * vendordata_dynamic_failure_fatal
# (integer value)
# Minimum value: 0
#vendordata_dynamic_read_timeout = 5
#
# Should failures to fetch dynamic vendordata be fatal to instance boot?
#
# Related options:
#
# * vendordata_providers
# * vendordata_dynamic_targets
# * vendordata_dynamic_ssl_certfile
# * vendordata_dynamic_connect_timeout
# * vendordata_dynamic_read_timeout
# (boolean value)
#vendordata_dynamic_failure_fatal = false
#
# This option is the time (in seconds) to cache metadata. When set to 0,
# metadata caching is disabled entirely; this is generally not recommended for
# performance reasons. Increasing this setting should improve response times
# of the metadata API when under heavy load. Higher values may increase memory
# usage, and result in longer times for host metadata changes to take effect.
# (integer value)
# Minimum value: 0
#metadata_cache_expiration = 15
#
# Cloud providers may store custom data in vendor data file that will then be
# available to the instances via the metadata service, and to the rendering of
# config-drive. The default class for this, JsonFileVendorData, loads this
# information from a JSON file, whose path is configured by this option. If
# there is no path set by this option, the class returns an empty dictionary.
#
# Possible values:
#
# * Any string representing the path to the data file, or an empty string
# (default).
# (string value)
#vendordata_jsonfile_path = <None>
#
# As a query can potentially return many thousands of items, you can limit the
# maximum number of items in a single response by setting this option.
# (integer value)
# Minimum value: 0
# Deprecated group/name - [DEFAULT]/osapi_max_limit
#max_limit = 1000
#
# This string is prepended to the normal URL that is returned in links to the
# OpenStack Compute API. If it is empty (the default), the URLs are returned
# unchanged.
#
# Possible values:
#
# * Any string, including an empty string (the default).
# (string value)
# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
#compute_link_prefix = <None>
#
# This string is prepended to the normal URL that is returned in links to
# Glance resources. If it is empty (the default), the URLs are returned
# unchanged.
#
# Possible values:
#
# * Any string, including an empty string (the default).
# (string value)
# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix
#glance_link_prefix = <None>
# DEPRECATED:
# Operators can turn off the ability for a user to take snapshots of their
# instances by setting this option to False. When disabled, any attempt to
# take a snapshot will result in a HTTP 400 response ("Bad Request").
# (boolean value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: This option disables the createImage server action API in a non-
# discoverable way and is thus a barrier to interoperability. Also, it is not
# used for other APIs that create snapshots like shelve or createBackup.
# Disabling snapshots should be done via policy if so desired.
#allow_instance_snapshots = true
# DEPRECATED:
# This option is a list of all instance states for which network address
# information should not be returned from the API.
#
# Possible values:
#
# A list of strings, where each string is a valid VM state, as defined in
# nova/compute/vm_states.py. As of the Newton release, they are:
#
# * "active"
# * "building"
# * "paused"
# * "suspended"
# * "stopped"
# * "rescued"
# * "resized"
# * "soft-delete"
# * "deleted"
# * "error"
# * "shelved"
# * "shelved_offloaded"
# (list value)
# Deprecated group/name - [DEFAULT]/osapi_hide_server_address_states
# This option is deprecated for removal since 17.0.0.
# Its value may be silently ignored in the future.
# Reason: This option hide the server address in server representation for
# configured server states. Which makes GET server API controlled by this config
# options. Due to this config options, user would not be able to discover the
# API behavior on different clouds which leads to the interop issue.
#hide_server_address_states = building
# The full path to the fping binary. (string value)
#fping_path = /usr/sbin/fping
#
# When True, the TenantNetworkController will query the Neutron API to get the
# default networks to use.
#
# Related options:
#
# * neutron_default_tenant_id
# (boolean value)
#use_neutron_default_nets = false
#
# Tenant ID for getting the default network from Neutron API (also referred in
# some places as the 'project ID') to use.
#
# Related options:
#
# * use_neutron_default_nets
# (string value)
#neutron_default_tenant_id = default
#
# Enables returning of the instance password by the relevant server API calls
# such as create, rebuild, evacuate, or rescue. If the hypervisor does not
# support password injection, then the password returned will not be correct,
# so if your hypervisor does not support password injection, set this to False.
# (boolean value)
#enable_instance_password = true
[api_database]
#
# The *Nova API Database* is a separate database which is used for information
# which is used across *cells*. This database is mandatory since the Mitaka
# release (13.0.0).
#
# From nova.conf
#
# The SQLAlchemy connection string to use to connect to the database. (string
# value)
#connection = <None>
# If True, SQLite uses synchronous mode. (boolean value)
#sqlite_synchronous = true
# The SQLAlchemy connection string to use to connect to the slave database.
# (string value)
#slave_connection = <None>
# The SQL mode to be used for MySQL sessions. This option, including the
# default, overrides any server-set SQL mode. To use whatever SQL mode is set by
# the server configuration, set this to no value. Example: mysql_sql_mode=
# (string value)
#mysql_sql_mode = TRADITIONAL
# Connections which have been present in the connection pool longer than this
# number of seconds will be replaced with a new one the next time they are
# checked out from the pool. (integer value)
# Deprecated group/name - [api_database]/idle_timeout
#connection_recycle_time = 3600
# Maximum number of SQL connections to keep open in a pool. Setting a value of 0
# indicates no limit. (integer value)
#max_pool_size = <None>
# Maximum number of database connection retries during startup. Set to -1 to
# specify an infinite retry count. (integer value)
#max_retries = 10
# Interval between retries of opening a SQL connection. (integer value)
#retry_interval = 10
# If set, use this value for max_overflow with SQLAlchemy. (integer value)
#max_overflow = <None>
# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
# value)
#connection_debug = 0
# Add Python stack traces to SQL as comment strings. (boolean value)
#connection_trace = false
# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
#pool_timeout = <None>
[barbican]
#
# From nova.conf
#
# Use this endpoint to connect to Barbican, for example:
# "http://localhost:9311/" (string value)
#barbican_endpoint = <None>
# Version of the Barbican API, for example: "v1" (string value)
#barbican_api_version = <None>
# Use this endpoint to connect to Keystone (string value)
# Deprecated group/name - [key_manager]/auth_url
#auth_endpoint = http://localhost/identity/v3
# Number of seconds to wait before retrying poll for key creation completion
# (integer value)
#retry_delay = 1
# Number of times to retry poll for key creation completion (integer value)
#number_of_retries = 60
# Specifies if insecure TLS (https) requests. If False, the server's certificate
# will not be validated (boolean value)
#verify_ssl = true
[cache]
#
# From nova.conf
#
# Prefix for building the configuration dictionary for the cache region. This
# should not need to be changed unless there is another dogpile.cache region
# with the same configuration name. (string value)
#config_prefix = cache.oslo
# Default TTL, in seconds, for any cached item in the dogpile.cache region. This
# applies to any cached method that doesn't have an explicit cache expiration
# time defined for it. (integer value)
#expiration_time = 600
# Cache backend module. For eventlet-based or environments with hundreds of
# threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is
# recommended. For environments with less than 100 threaded servers, Memcached
# (dogpile.cache.memcached) or Redis (dogpile.cache.redis) is recommended. Test
# environments with a single instance of the server can use the
# dogpile.cache.memory backend. (string value)
# Possible values:
# oslo_cache.memcache_pool - <No description provided>
# oslo_cache.dict - <No description provided>
# oslo_cache.mongo - <No description provided>
# oslo_cache.etcd3gw - <No description provided>
# dogpile.cache.memcached - <No description provided>
# dogpile.cache.pylibmc - <No description provided>
# dogpile.cache.bmemcached - <No description provided>
# dogpile.cache.dbm - <No description provided>
# dogpile.cache.redis - <No description provided>
# dogpile.cache.memory - <No description provided>
# dogpile.cache.memory_pickle - <No description provided>
# dogpile.cache.null - <No description provided>
#backend = dogpile.cache.null
# Arguments supplied to the backend module. Specify this option once per
# argument to be passed to the dogpile.cache backend. Example format:
# "<argname>:<value>". (multi valued)
#backend_argument =
# Proxy classes to import that will affect the way the dogpile.cache backend
# functions. See the dogpile.cache documentation on changing-backend-behavior.
# (list value)
#proxies =
# Global toggle for caching. (boolean value)
#enabled = false
# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls).
# This is only really useful if you need to see the specific cache-backend
# get/set/delete calls with the keys/values. Typically this should be left set
# to false. (boolean value)
#debug_cache_backend = false
# Memcache servers in the format of "host:port". (dogpile.cache.memcache and
# oslo_cache.memcache_pool backends only). (list value)
#memcache_servers = localhost:11211
# Number of seconds memcached server is considered dead before it is tried
# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
# (integer value)
#memcache_dead_retry = 300
# Timeout in seconds for every call to a server. (dogpile.cache.memcache and
# oslo_cache.memcache_pool backends only). (integer value)
#memcache_socket_timeout = 3
# Max total number of open connections to every memcached server.
# (oslo_cache.memcache_pool backend only). (integer value)
#memcache_pool_maxsize = 10
# Number of seconds a connection to memcached is held unused in the pool before
# it is closed. (oslo_cache.memcache_pool backend only). (integer value)
#memcache_pool_unused_timeout = 60
# Number of seconds that an operation will wait to get a memcache client
# connection. (integer value)
#memcache_pool_connection_get_timeout = 10
[cells]
#
# DEPRECATED: Cells options allow you to use cells v1 functionality in an
# OpenStack deployment.
#
# Note that the options in this group are only for cells v1 functionality, which
# is considered experimental and not recommended for new deployments. Cells v1
# is being replaced with cells v2, which starting in the 15.0.0 Ocata release is
# required and all Nova deployments will be at least a cells v2 cell of one.
#
#
# From nova.conf
#
# DEPRECATED:
# Enable cell v1 functionality.
#
# Note that cells v1 is considered experimental and not recommended for new
# Nova deployments. Cells v1 is being replaced by cells v2 which starting in
# the 15.0.0 Ocata release, all Nova deployments are at least a cells v2 cell
# of one. Setting this option, or any other options in the [cells] group, is
# not required for cells v2.
#
# When this functionality is enabled, it lets you to scale an OpenStack
# Compute cloud in a more distributed fashion without having to use
# complicated technologies like database and message queue clustering.
# Cells are configured as a tree. The top-level cell should have a host
# that runs a nova-api service, but no nova-compute services. Each
# child cell should run all of the typical nova-* services in a regular
# Compute cloud except for nova-api. You can think of cells as a normal
# Compute deployment in that each cell has its own database server and
# message queue broker.
#
# Related options:
#
# * name: A unique cell name must be given when this functionality
# is enabled.
# * cell_type: Cell type should be defined for all cells.
# (boolean value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#enable = false
# DEPRECATED:
# Name of the current cell.
#
# This value must be unique for each cell. Name of a cell is used as
# its id, leaving this option unset or setting the same name for
# two or more cells may cause unexpected behaviour.
#
# Related options:
#
# * enabled: This option is meaningful only when cells service
# is enabled
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#name = nova
# DEPRECATED:
# Cell capabilities.
#
# List of arbitrary key=value pairs defining capabilities of the
# current cell to be sent to the parent cells. These capabilities
# are intended to be used in cells scheduler filters/weighers.
#
# Possible values:
#
# * key=value pairs list for example;
# ``hypervisor=xenserver;kvm,os=linux;windows``
# (list value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#capabilities = hypervisor=xenserver;kvm,os=linux;windows
# DEPRECATED:
# Call timeout.
#
# Cell messaging module waits for response(s) to be put into the
# eventlet queue. This option defines the seconds waited for
# response from a call to a cell.
#
# Possible values:
#
# * An integer, corresponding to the interval time in seconds.
# (integer value)
# Minimum value: 0
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#call_timeout = 60
# DEPRECATED:
# Reserve percentage
#
# Percentage of cell capacity to hold in reserve, so the minimum
# amount of free resource is considered to be;
#
# min_free = total * (reserve_percent / 100.0)
#
# This option affects both memory and disk utilization.
#
# The primary purpose of this reserve is to ensure some space is
# available for users who want to resize their instance to be larger.
# Note that currently once the capacity expands into this reserve
# space this option is ignored.
#
# Possible values:
#
# * An integer or float, corresponding to the percentage of cell capacity to
# be held in reserve.
# (floating point value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#reserve_percent = 10.0
# DEPRECATED:
# Type of cell.
#
# When cells feature is enabled the hosts in the OpenStack Compute
# cloud are partitioned into groups. Cells are configured as a tree.
# The top-level cell's cell_type must be set to ``api``. All other
# cells are defined as a ``compute cell`` by default.
#
# Related option:
#
# * quota_driver: Disable quota checking for the child cells.
# (nova.quota.NoopQuotaDriver)
# (string value)
# Possible values:
# api - <No description provided>
# compute - <No description provided>
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#cell_type = compute
# DEPRECATED:
# Mute child interval.
#
# Number of seconds after which a lack of capability and capacity
# update the child cell is to be treated as a mute cell. Then the
# child cell will be weighed as recommend highly that it be skipped.
#
# Possible values:
#
# * An integer, corresponding to the interval time in seconds.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#mute_child_interval = 300
# DEPRECATED:
# Bandwidth update interval.
#
# Seconds between bandwidth usage cache updates for cells.
#
# Possible values:
#
# * An integer, corresponding to the interval time in seconds.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#bandwidth_update_interval = 600
# DEPRECATED:
# Instance update sync database limit.
#
# Number of instances to pull from the database at one time for
# a sync. If there are more instances to update the results will
# be paged through.
#
# Possible values:
#
# * An integer, corresponding to a number of instances.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#instance_update_sync_database_limit = 100
# DEPRECATED:
# Mute weight multiplier.
#
# Multiplier used to weigh mute children. Mute children cells are
# recommended to be skipped so their weight is multiplied by this
# negative value.
#
# Possible values:
#
# * Negative numeric number
# (floating point value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#mute_weight_multiplier = -10000.0
# DEPRECATED:
# Ram weight multiplier.
#
# Multiplier used for weighing ram. Negative numbers indicate that
# Compute should stack VMs on one host instead of spreading out new
# VMs to more hosts in the cell.
#
# Possible values:
#
# * Numeric multiplier
# (floating point value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#ram_weight_multiplier = 10.0
# DEPRECATED:
# Offset weight multiplier
#
# Multiplier used to weigh offset weigher. Cells with higher
# weight_offsets in the DB will be preferred. The weight_offset
# is a property of a cell stored in the database. It can be used
# by a deployer to have scheduling decisions favor or disfavor
# cells based on the setting.
#
# Possible values:
#
# * Numeric multiplier
# (floating point value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#offset_weight_multiplier = 1.0
# DEPRECATED:
# Instance updated at threshold
#
# Number of seconds after an instance was updated or deleted to
# continue to update cells. This option lets cells manager to only
# attempt to sync instances that have been updated recently.
# i.e., a threshold of 3600 means to only update instances that
# have modified in the last hour.
#
# Possible values:
#
# * Threshold in seconds
#
# Related options:
#
# * This value is used with the ``instance_update_num_instances``
# value in a periodic task run.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#instance_updated_at_threshold = 3600
# DEPRECATED:
# Instance update num instances
#
# On every run of the periodic task, nova cells manager will attempt to
# sync instance_updated_at_threshold number of instances. When the
# manager gets the list of instances, it shuffles them so that multiple
# nova-cells services do not attempt to sync the same instances in
# lockstep.
#
# Possible values:
#
# * Positive integer number
#
# Related options:
#
# * This value is used with the ``instance_updated_at_threshold``
# value in a periodic task run.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#instance_update_num_instances = 1
# DEPRECATED:
# Maximum hop count
#
# When processing a targeted message, if the local cell is not the
# target, a route is defined between neighbouring cells. And the
# message is processed across the whole routing path. This option
# defines the maximum hop counts until reaching the target.
#
# Possible values:
#
# * Positive integer value
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#max_hop_count = 10
# DEPRECATED:
# Cells scheduler.
#
# The class of the driver used by the cells scheduler. This should be
# the full Python path to the class to be used. If nothing is specified
# in this option, the CellsScheduler is used.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#scheduler = nova.cells.scheduler.CellsScheduler
# DEPRECATED:
# RPC driver queue base.
#
# When sending a message to another cell by JSON-ifying the message
# and making an RPC cast to 'process_message', a base queue is used.
# This option defines the base queue name to be used when communicating
# between cells. Various topics by message type will be appended to this.
#
# Possible values:
#
# * The base queue name to be used when communicating between cells.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#rpc_driver_queue_base = cells.intercell
# DEPRECATED:
# Scheduler filter classes.
#
# Filter classes the cells scheduler should use. An entry of
# "nova.cells.filters.all_filters" maps to all cells filters
# included with nova. As of the Mitaka release the following
# filter classes are available:
#
# Different cell filter: A scheduler hint of 'different_cell'
# with a value of a full cell name may be specified to route
# a build away from a particular cell.
#
# Image properties filter: Image metadata named
# 'hypervisor_version_requires' with a version specification
# may be specified to ensure the build goes to a cell which
# has hypervisors of the required version. If either the version
# requirement on the image or the hypervisor capability of the
# cell is not present, this filter returns without filtering out
# the cells.
#
# Target cell filter: A scheduler hint of 'target_cell' with a
# value of a full cell name may be specified to route a build to
# a particular cell. No error handling is done as there's no way
# to know whether the full path is a valid.
#
# As an admin user, you can also add a filter that directs builds
# to a particular cell.
#
# (list value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#scheduler_filter_classes = nova.cells.filters.all_filters
# DEPRECATED:
# Scheduler weight classes.
#
# Weigher classes the cells scheduler should use. An entry of
# "nova.cells.weights.all_weighers" maps to all cell weighers
# included with nova. As of the Mitaka release the following
# weight classes are available:
#
# mute_child: Downgrades the likelihood of child cells being
# chosen for scheduling requests, which haven't sent capacity
# or capability updates in a while. Options include
# mute_weight_multiplier (multiplier for mute children; value
# should be negative).
#
# ram_by_instance_type: Select cells with the most RAM capacity
# for the instance type being requested. Because higher weights
# win, Compute returns the number of available units for the
# instance type requested. The ram_weight_multiplier option defaults
# to 10.0 that adds to the weight by a factor of 10. Use a negative
# number to stack VMs on one host instead of spreading out new VMs
# to more hosts in the cell.
#
# weight_offset: Allows modifying the database to weight a particular
# cell. The highest weight will be the first cell to be scheduled for
# launching an instance. When the weight_offset of a cell is set to 0,
# it is unlikely to be picked but it could be picked if other cells
# have a lower weight, like if they're full. And when the weight_offset
# is set to a very high value (for example, '999999999999999'), it is
# likely to be picked if another cell do not have a higher weight.
# (list value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#scheduler_weight_classes = nova.cells.weights.all_weighers
# DEPRECATED:
# Scheduler retries.
#
# How many retries when no cells are available. Specifies how many
# times the scheduler tries to launch a new instance when no cells
# are available.
#
# Possible values:
#
# * Positive integer value
#
# Related options:
#
# * This value is used with the ``scheduler_retry_delay`` value
# while retrying to find a suitable cell.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#scheduler_retries = 10
# DEPRECATED:
# Scheduler retry delay.
#
# Specifies the delay (in seconds) between scheduling retries when no
# cell can be found to place the new instance on. When the instance
# could not be scheduled to a cell after ``scheduler_retries`` in
# combination with ``scheduler_retry_delay``, then the scheduling
# of the instance failed.
#
# Possible values:
#
# * Time in seconds.
#
# Related options:
#
# * This value is used with the ``scheduler_retries`` value
# while retrying to find a suitable cell.
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#scheduler_retry_delay = 2
# DEPRECATED:
# DB check interval.
#
# Cell state manager updates cell status for all cells from the DB
# only after this particular interval time is passed. Otherwise cached
# status are used. If this value is 0 or negative all cell status are
# updated from the DB whenever a state is needed.
#
# Possible values:
#
# * Interval time, in seconds.
#
# (integer value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#db_check_interval = 60
# DEPRECATED:
# Optional cells configuration.
#
# Configuration file from which to read cells configuration. If given,
# overrides reading cells from the database.
#
# Cells store all inter-cell communication data, including user names
# and passwords, in the database. Because the cells data is not updated
# very frequently, use this option to specify a JSON file to store
# cells data. With this configuration, the database is no longer
# consulted when reloading the cells data. The file must have columns
# present in the Cell model (excluding common database fields and the
# id column). You must specify the queue connection information through
# a transport_url field, instead of username, password, and so on.
#
# The transport_url has the following form:
# rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
#
# Possible values:
#
# The scheme can be either qpid or rabbit, the following sample shows
# this optional configuration:
#
# {
# "parent": {
# "name": "parent",
# "api_url": "http://api.example.com:8774",
# "transport_url": "rabbit://rabbit.example.com",
# "weight_offset": 0.0,
# "weight_scale": 1.0,
# "is_parent": true
# },
# "cell1": {
# "name": "cell1",
# "api_url": "http://api.example.com:8774",
# "transport_url": "rabbit://rabbit1.example.com",
# "weight_offset": 0.0,
# "weight_scale": 1.0,
# "is_parent": false
# },
# "cell2": {
# "name": "cell2",
# "api_url": "http://api.example.com:8774",
# "transport_url": "rabbit://rabbit2.example.com",
# "weight_offset": 0.0,
# "weight_scale": 1.0,
# "is_parent": false
# }
# }
#
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason: Cells v1 is being replaced with Cells v2.
#cells_config = <None>
[cinder]
#
# From nova.conf
#
#
# Info to match when looking for cinder in the service catalog.
#
# Possible values:
#
# * Format is separated values of the form:
# <service_type>:<service_name>:<endpoint_type>
#
# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0 Queens
# release.
#
# Related options:
#
# * endpoint_template - Setting this option will override catalog_info
# (string value)
#catalog_info = volumev3:cinderv3:publicURL
#
# If this option is set then it will override service catalog lookup with
# this template for cinder endpoint
#
# Possible values:
#
# * URL for cinder endpoint API
# e.g. http://localhost:8776/v3/%(project_id)s
#
# Note: Nova does not support the Cinder v2 API since the Nova 17.0.0 Queens
# release.
#
# Related options:
#
# * catalog_info - If endpoint_template is not set, catalog_info will be used.
# (string value)
#endpoint_template = <None>
#
# Region name of this node. This is used when picking the URL in the service
# catalog.
#
# Possible values:
#
# * Any string representing region name
# (string value)
#os_region_name = <None>
#
# Number of times cinderclient should retry on any failed http call.
# 0 means connection is attempted only once. Setting it to any positive integer
# means that on failure connection is retried that many times e.g. setting it
# to 3 means total attempts to connect will be 4.
#
# Possible values:
#
# * Any integer value. 0 means connection is attempted only once
# (integer value)
# Minimum value: 0
#http_retries = 3
#
# Allow attach between instance and volume in different availability zones.
#
# If False, volumes attached to an instance must be in the same availability
# zone in Cinder as the instance availability zone in Nova.
# This also means care should be taken when booting an instance from a volume
# where source is not "volume" because Nova will attempt to create a volume
# using
# the same availability zone as what is assigned to the instance.
# If that AZ is not in Cinder (or allow_availability_zone_fallback=False in
# cinder.conf), the volume create request will fail and the instance will fail
# the build request.
# By default there is no availability zone restriction on volume attach.
# (boolean value)
#cross_az_attach = true
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# (string value)
#cafile = <None>
# PEM encoded client certificate cert file (string value)
#certfile = <None>
# PEM encoded client certificate key file (string value)
#keyfile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Timeout value for http requests (integer value)
#timeout = <None>
# Authentication type to load (string value)
# Deprecated group/name - [cinder]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (string value)
#auth_section = <None>
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
#project_id = <None>
# Project name to scope to (string value)
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# Trust ID (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User ID (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [cinder]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
# Tenant ID (string value)
#tenant_id = <None>
# Tenant Name (string value)
#tenant_name = <None>
[compute]
#
# From nova.conf
#
#
# Enables reporting of build failures to the scheduler.
#
# Any nonzero value will enable sending build failure statistics to the
# scheduler for use by the BuildFailureWeigher.
#
# Possible values:
#
# * Any positive integer enables reporting build failures.
# * Zero to disable reporting build failures.
#
# Related options:
#
# * [filter_scheduler]/build_failure_weight_multiplier
#
# (integer value)
#consecutive_build_service_disable_threshold = 10
#
# Interval for updating nova-compute-side cache of the compute node resource
# provider's aggregates and traits info.
#
# This option specifies the number of seconds between attempts to update a
# provider's aggregates and traits information in the local cache of the compute
# node.
#
# Possible values:
#
# * Any positive integer in seconds.
# (integer value)
# Minimum value: 1
#resource_provider_association_refresh = 300
#
# Determine if the source compute host should wait for a ``network-vif-plugged``
# event from the (neutron) networking service before starting the actual
# transfer
# of the guest to the destination compute host.
#
# If you set this option the same on all of your compute hosts, which you should
# do if you use the same networking backend universally, you do not have to
# worry about this.
#
# Before starting the transfer of the guest, some setup occurs on the
# destination
# compute host, including plugging virtual interfaces. Depending on the
# networking backend **on the destination host**, a ``network-vif-plugged``
# event may be triggered and then received on the source compute host and the
# source compute can wait for that event to ensure networking is set up on the
# destination host before starting the guest transfer in the hypervisor.
#
# By default, this is False for two reasons:
#
# 1. Backward compatibility: deployments should test this out and ensure it
# works
# for them before enabling it.
#
# 2. The compute service cannot reliably determine which types of virtual
# interfaces (``port.binding:vif_type``) will send ``network-vif-plugged``
# events without an accompanying port ``binding:host_id`` change.
# Open vSwitch and linuxbridge should be OK, but OpenDaylight is at least
# one known backend that will not currently work in this case, see bug
# https://launchpad.net/bugs/1755890 for more details.
#
# Possible values:
#
# * True: wait for ``network-vif-plugged`` events before starting guest transfer
# * False: do not wait for ``network-vif-plugged`` events before starting guest
# transfer (this is how things have always worked before this option
# was introduced)
#
# Related options:
#
# * [DEFAULT]/vif_plugging_is_fatal: if ``live_migration_wait_for_vif_plug`` is
# True and ``vif_plugging_timeout`` is greater than 0, and a timeout is
# reached, the live migration process will fail with an error but the guest
# transfer will not have started to the destination host
# * [DEFAULT]/vif_plugging_timeout: if ``live_migration_wait_for_vif_plug`` is
# True, this controls the amount of time to wait before timing out and either
# failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the
# live migration
# (boolean value)
#live_migration_wait_for_vif_plug = false
[conductor]
#
# Options under this group are used to define Conductor's communication,
# which manager should be act as a proxy between computes and database,
# and finally, how many worker processes will be used.
#
# From nova.conf
#
# DEPRECATED:
# Topic exchange name on which conductor nodes listen.
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# There is no need to let users choose the RPC topic for all services - there
# is little gain from this. Furthermore, it makes it really easy to break Nova
# by using this option.
#topic = conductor
#
# Number of workers for OpenStack Conductor service. The default will be the
# number of CPUs available.
# (integer value)
#workers = <None>
[console]
#
# Options under this group allow to tune the configuration of the console proxy
# service.
#
# Note: in configuration of every compute is a ``console_host`` option,
# which allows to select the console proxy service to connect to.
#
# From nova.conf
#
#
# Adds list of allowed origins to the console websocket proxy to allow
# connections from other origin hostnames.
# Websocket proxy matches the host header with the origin header to
# prevent cross-site requests. This list specifies if any there are
# values other than host are allowed in the origin header.
#
# Possible values:
#
# * A list where each element is an allowed origin hostnames, else an empty list
# (list value)
# Deprecated group/name - [DEFAULT]/console_allowed_origins
#allowed_origins =
[consoleauth]
#
# From nova.conf
#
#
# The lifetime of a console auth token (in seconds).
#
# A console auth token is used in authorizing console access for a user.
# Once the auth token time to live count has elapsed, the token is
# considered expired. Expired tokens are then deleted.
# (integer value)
# Minimum value: 0
# Deprecated group/name - [DEFAULT]/console_token_ttl
#token_ttl = 600
[cors]
#
# From oslo.middleware
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
# slash. Example: https://horizon.example.com (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Service-Token
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request. (list
# value)
#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id
[crypto]
#
# From nova.conf
#
#
# Filename of root CA (Certificate Authority). This is a container format
# and includes root certificates.
#
# Possible values:
#
# * Any file name containing root CA, cacert.pem is default
#
# Related options:
#
# * ca_path
# (string value)
#ca_file = cacert.pem
#
# Filename of a private key.
#
# Related options:
#
# * keys_path
# (string value)
#key_file = private/cakey.pem
#
# Filename of root Certificate Revocation List (CRL). This is a list of
# certificates that have been revoked, and therefore, entities presenting
# those (revoked) certificates should no longer be trusted.
#
# Related options:
#
# * ca_path
# (string value)
#crl_file = crl.pem
#
# Directory path where keys are located.
#
# Related options:
#
# * key_file
# (string value)
#keys_path = $state_path/keys
#
# Directory path where root CA is located.
#
# Related options:
#
# * ca_file
# (string value)
#ca_path = $state_path/CA
# Option to enable/disable use of CA for each project. (boolean value)
#use_project_ca = false
#
# Subject for certificate for users, %s for
# project, user, timestamp
# (string value)
#user_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
#
# Subject for certificate for projects, %s for
# project, timestamp
# (string value)
#project_cert_subject = /C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
[database]
#
# From oslo.db
#
# If True, SQLite uses synchronous mode. (boolean value)
#sqlite_synchronous = true
# The back end to use for the database. (string value)
# Deprecated group/name - [DEFAULT]/db_backend
#backend = sqlalchemy
# The SQLAlchemy connection string to use to connect to the database. (string
# value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection = <None>
# The SQLAlchemy connection string to use to connect to the slave database.
# (string value)
#slave_connection = <None>
# The SQL mode to be used for MySQL sessions. This option, including the
# default, overrides any server-set SQL mode. To use whatever SQL mode is set by
# the server configuration, set this to no value. Example: mysql_sql_mode=
# (string value)
#mysql_sql_mode = TRADITIONAL
# If True, transparently enables support for handling MySQL Cluster (NDB).
# (boolean value)
#mysql_enable_ndb = false
# Connections which have been present in the connection pool longer than this
# number of seconds will be replaced with a new one the next time they are
# checked out from the pool. (integer value)
# Deprecated group/name - [DATABASE]/idle_timeout
# Deprecated group/name - [database]/idle_timeout
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#connection_recycle_time = 3600
# Minimum number of SQL connections to keep open in a pool. (integer value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool. Setting a value of 0
# indicates no limit. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size = 5
# Maximum number of database connection retries during startup. Set to -1 to
# specify an infinite retry count. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries = 10
# Interval between retries of opening a SQL connection. (integer value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval = 10
# If set, use this value for max_overflow with SQLAlchemy. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow = 50
# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
# value)
# Minimum value: 0
# Maximum value: 100
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
# Add Python stack traces to SQL as comment strings. (boolean value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace = false
# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout = <None>
# Enable the experimental use of database reconnect on connection lost. (boolean
# value)
#use_db_reconnect = false
# Seconds between retries of a database transaction. (integer value)
#db_retry_interval = 1
# If True, increases the interval between retries of a database operation up to
# db_max_retry_interval. (boolean value)
#db_inc_retry_interval = true
# If db_inc_retry_interval is set, the maximum seconds between retries of a
# database operation. (integer value)
#db_max_retry_interval = 10
# Maximum retries in case of connection error or deadlock error before error is
# raised. Set to -1 to specify an infinite retry count. (integer value)
#db_max_retries = 20
#
# From oslo.db.concurrency
#
# Enable the experimental use of thread pooling for all DB API calls (boolean
# value)
# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
#use_tpool = false
[devices]
#
# From nova.conf
#
#
# A list of the vGPU types enabled in the compute node.
#
# Some pGPUs (e.g. NVIDIA GRID K1) support different vGPU types. User can use
# this option to specify a list of enabled vGPU types that may be assigned to a
# guest instance. But please note that Nova only supports a single type in the
# Queens release. If more than one vGPU type is specified (as a comma-separated
# list), only the first one will be used. An example is as the following:
# [devices]
# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
# (list value)
#enabled_vgpu_types =
[ephemeral_storage_encryption]
#
# From nova.conf
#
#
# Enables/disables LVM ephemeral storage encryption.
# (boolean value)
#enabled = false
#
# Cipher-mode string to be used.
#
# The cipher and mode to be used to encrypt ephemeral storage. The set of
# cipher-mode combinations available depends on kernel support. According
# to the dm-crypt documentation, the cipher is expected to be in the format:
# "<cipher>-<chainmode>-<ivmode>".
#
# Possible values:
#
# * Any crypto option listed in ``/proc/crypto``.
# (string value)
#cipher = aes-xts-plain64
#
# Encryption key length in bits.
#
# The bit length of the encryption key to be used to encrypt ephemeral storage.
# In XTS mode only half of the bits are used for encryption key.
# (integer value)
# Minimum value: 1
#key_size = 512
[filter_scheduler]
#
# From nova.conf
#
#
# Size of subset of best hosts selected by scheduler.
#
# New instances will be scheduled on a host chosen randomly from a subset of the
# N best hosts, where N is the value set by this option.
#
# Setting this to a value greater than 1 will reduce the chance that multiple
# scheduler processes handling similar requests will select the same host,
# creating a potential race condition. By selecting a host randomly from the N
# hosts that best fit the request, the chance of a conflict is reduced. However,
# the higher you set this value, the less optimal the chosen host may be for a
# given request.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * An integer, where the integer corresponds to the size of a host subset. Any
# integer is valid, although any value less than 1 will be treated as 1
# (integer value)
# Minimum value: 1
# Deprecated group/name - [DEFAULT]/scheduler_host_subset_size
#host_subset_size = 1
#
# The number of instances that can be actively performing IO on a host.
#
# Instances performing IO includes those in the following states: build, resize,
# snapshot, migrate, rescue, unshelve.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'io_ops_filter' filter is enabled.
#
# Possible values:
#
# * An integer, where the integer corresponds to the max number of instances
# that can be actively performing IO on any given host.
# (integer value)
#max_io_ops_per_host = 8
#
# Maximum number of instances that be active on a host.
#
# If you need to limit the number of instances on any given host, set this
# option
# to the maximum number of instances you want to allow. The num_instances_filter
# will reject any host that has at least as many instances as this option's
# value.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'num_instances_filter' filter is enabled.
#
# Possible values:
#
# * An integer, where the integer corresponds to the max instances that can be
# scheduled on a host.
# (integer value)
# Minimum value: 1
#max_instances_per_host = 50
#
# Enable querying of individual hosts for instance information.
#
# The scheduler may need information about the instances on a host in order to
# evaluate its filters and weighers. The most common need for this information
# is
# for the (anti-)affinity filters, which need to choose a host based on the
# instances already running on a host.
#
# If the configured filters and weighers do not need this information, disabling
# this option will improve performance. It may also be disabled when the
# tracking
# overhead proves too heavy, although this will cause classes requiring host
# usage data to query the database on each request instead.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# NOTE: In a multi-cell (v2) setup where the cell MQ is separated from the
# top-level, computes cannot directly communicate with the scheduler. Thus,
# this option cannot be enabled in that scenario. See also the
# [workarounds]/disable_group_policy_check_upcall option.
# (boolean value)
# Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes
#track_instance_changes = true
#
# Filters that the scheduler can use.
#
# An unordered list of the filter classes the nova scheduler may apply. Only
# the
# filters specified in the 'enabled_filters' option will be used, but
# any filter appearing in that option must also be included in this list.
#
# By default, this is set to all filters that are included with nova.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * A list of zero or more strings, where each string corresponds to the name of
# a filter that may be used for selecting a host
#
# Related options:
#
# * enabled_filters
# (multi valued)
# Deprecated group/name - [DEFAULT]/scheduler_available_filters
#available_filters = nova.scheduler.filters.all_filters
#
# Filters that the scheduler will use.
#
# An ordered list of filter class names that will be used for filtering
# hosts. These filters will be applied in the order they are listed so
# place your most restrictive filters first to make the filtering process more
# efficient.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * A list of zero or more strings, where each string corresponds to the name of
# a filter to be used for selecting a host
#
# Related options:
#
# * All of the filters in this option *must* be present in the
# 'scheduler_available_filters' option, or a SchedulerHostFilterNotFound
# exception will be raised.
# (list value)
# Deprecated group/name - [DEFAULT]/scheduler_default_filters
#enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
# DEPRECATED:
# Filters used for filtering baremetal hosts.
#
# Filters are applied in order, so place your most restrictive filters first to
# make the filtering process more efficient.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * A list of zero or more strings, where each string corresponds to the name of
# a filter to be used for selecting a baremetal host
#
# Related options:
#
# * If the 'scheduler_use_baremetal_filters' option is False, this option has
# no effect.
# (list value)
# Deprecated group/name - [DEFAULT]/baremetal_scheduler_default_filters
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason:
# These filters were used to overcome some of the baremetal scheduling
# limitations in Nova prior to the use of the Placement API. Now scheduling will
# use the custom resource class defined for each baremetal node to make its
# selection.
#baremetal_enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
# DEPRECATED:
# Enable baremetal filters.
#
# Set this to True to tell the nova scheduler that it should use the filters
# specified in the 'baremetal_enabled_filters' option. If you are not
# scheduling baremetal nodes, leave this at the default setting of False.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Related options:
#
# * If this option is set to True, then the filters specified in the
# 'baremetal_enabled_filters' are used instead of the filters
# specified in 'enabled_filters'.
# (boolean value)
# Deprecated group/name - [DEFAULT]/scheduler_use_baremetal_filters
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason:
# These filters were used to overcome some of the baremetal scheduling
# limitations in Nova prior to the use of the Placement API. Now scheduling will
# use the custom resource class defined for each baremetal node to make its
# selection.
#use_baremetal_filters = false
#
# Weighers that the scheduler will use.
#
# Only hosts which pass the filters are weighed. The weight for any host starts
# at 0, and the weighers order these hosts by adding to or subtracting from the
# weight assigned by the previous weigher. Weights may become negative. An
# instance will be scheduled to one of the N most-weighted hosts, where N is
# 'scheduler_host_subset_size'.
#
# By default, this is set to all weighers that are included with Nova.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * A list of zero or more strings, where each string corresponds to the name of
# a weigher that will be used for selecting a host
# (list value)
# Deprecated group/name - [DEFAULT]/scheduler_weight_classes
#weight_classes = nova.scheduler.weights.all_weighers
#
# Ram weight multipler ratio.
#
# This option determines how hosts with more or less available RAM are weighed.
# A
# positive value will result in the scheduler preferring hosts with more
# available RAM, and a negative number will result in the scheduler preferring
# hosts with less available RAM. Another way to look at it is that positive
# values for this option will tend to spread instances across many hosts, while
# negative values will tend to fill up (stack) hosts as much as possible before
# scheduling to a less-used host. The absolute value, whether positive or
# negative, controls how strong the RAM weigher is relative to other weighers.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'ram' weigher is enabled.
#
# Possible values:
#
# * An integer or float value, where the value corresponds to the multipler
# ratio for this weigher.
# (floating point value)
#ram_weight_multiplier = 1.0
#
# Disk weight multipler ratio.
#
# Multiplier used for weighing free disk space. Negative numbers mean to
# stack vs spread.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'disk' weigher is enabled.
#
# Possible values:
#
# * An integer or float value, where the value corresponds to the multipler
# ratio for this weigher.
# (floating point value)
#disk_weight_multiplier = 1.0
#
# IO operations weight multipler ratio.
#
# This option determines how hosts with differing workloads are weighed.
# Negative
# values, such as the default, will result in the scheduler preferring hosts
# with
# lighter workloads whereas positive values will prefer hosts with heavier
# workloads. Another way to look at it is that positive values for this option
# will tend to schedule instances onto hosts that are already busy, while
# negative values will tend to distribute the workload across more hosts. The
# absolute value, whether positive or negative, controls how strong the io_ops
# weigher is relative to other weighers.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'io_ops' weigher is enabled.
#
# Possible values:
#
# * An integer or float value, where the value corresponds to the multipler
# ratio for this weigher.
# (floating point value)
#io_ops_weight_multiplier = -1.0
#
# PCI device affinity weight multiplier.
#
# The PCI device affinity weighter computes a weighting based on the number of
# PCI devices on the host and the number of PCI devices requested by the
# instance. The ``NUMATopologyFilter`` filter must be enabled for this to have
# any significance. For more information, refer to the filter documentation:
#
# https://docs.openstack.org/nova/latest/user/filter-scheduler.html
#
# Possible values:
#
# * A positive integer or float value, where the value corresponds to the
# multiplier ratio for this weigher.
# (floating point value)
# Minimum value: 0
#pci_weight_multiplier = 1.0
#
# Multiplier used for weighing hosts for group soft-affinity.
#
# Possible values:
#
# * An integer or float value, where the value corresponds to weight multiplier
# for hosts with group soft affinity. Only a positive value are meaningful, as
# negative values would make this behave as a soft anti-affinity weigher.
# (floating point value)
#soft_affinity_weight_multiplier = 1.0
#
# Multiplier used for weighing hosts for group soft-anti-affinity.
#
# Possible values:
#
# * An integer or float value, where the value corresponds to weight multiplier
# for hosts with group soft anti-affinity. Only a positive value are
# meaningful, as negative values would make this behave as a soft affinity
# weigher.
# (floating point value)
#soft_anti_affinity_weight_multiplier = 1.0
#
# Multiplier used for weighing hosts that have had recent build failures.
#
# This option determines how much weight is placed on a compute node with
# recent build failures. Build failures may indicate a failing, misconfigured,
# or otherwise ailing compute node, and avoiding it during scheduling may be
# beneficial. The weight is inversely proportional to the number of recent
# build failures the compute node has experienced. This value should be
# set to some high value to offset weight given by other enabled weighers
# due to available resources. To disable weighing compute hosts by the
# number of recent failures, set this to zero.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * An integer or float value, where the value corresponds to the multiplier
# ratio for this weigher.
#
# Related options:
#
# * [compute]/consecutive_build_service_disable_threshold - Must be nonzero
# for a compute to report data considered by this weigher.
# (floating point value)
#build_failure_weight_multiplier = 1000000.0
#
# Enable spreading the instances between hosts with the same best weight.
#
# Enabling it is beneficial for cases when host_subset_size is 1
# (default), but there is a large number of hosts with same maximal weight.
# This scenario is common in Ironic deployments where there are typically many
# baremetal nodes with identical weights returned to the scheduler.
# In such case enabling this option will reduce contention and chances for
# rescheduling events.
# At the same time it will make the instance packing (even in unweighed case)
# less dense.
# (boolean value)
#shuffle_best_same_weighed_hosts = false
#
# The default architecture to be used when using the image properties filter.
#
# When using the ImagePropertiesFilter, it is possible that you want to define
# a default architecture to make the user experience easier and avoid having
# something like x86_64 images landing on aarch64 compute nodes because the
# user did not specify the 'hw_architecture' property in Glance.
#
# Possible values:
#
# * CPU Architectures such as x86_64, aarch64, s390x.
# (string value)
# Possible values:
# alpha - <No description provided>
# armv6 - <No description provided>
# armv7l - <No description provided>
# armv7b - <No description provided>
# aarch64 - <No description provided>
# cris - <No description provided>
# i686 - <No description provided>
# ia64 - <No description provided>
# lm32 - <No description provided>
# m68k - <No description provided>
# microblaze - <No description provided>
# microblazeel - <No description provided>
# mips - <No description provided>
# mipsel - <No description provided>
# mips64 - <No description provided>
# mips64el - <No description provided>
# openrisc - <No description provided>
# parisc - <No description provided>
# parisc64 - <No description provided>
# ppc - <No description provided>
# ppcle - <No description provided>
# ppc64 - <No description provided>
# ppc64le - <No description provided>
# ppcemb - <No description provided>
# s390 - <No description provided>
# s390x - <No description provided>
# sh4 - <No description provided>
# sh4eb - <No description provided>
# sparc - <No description provided>
# sparc64 - <No description provided>
# unicore32 - <No description provided>
# x86_64 - <No description provided>
# xtensa - <No description provided>
# xtensaeb - <No description provided>
#image_properties_default_architecture = <None>
#
# List of UUIDs for images that can only be run on certain hosts.
#
# If there is a need to restrict some images to only run on certain designated
# hosts, list those image UUIDs here.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
#
# Possible values:
#
# * A list of UUID strings, where each string corresponds to the UUID of an
# image
#
# Related options:
#
# * scheduler/isolated_hosts
# * scheduler/restrict_isolated_hosts_to_isolated_images
# (list value)
#isolated_images =
#
# List of hosts that can only run certain images.
#
# If there is a need to restrict some images to only run on certain designated
# hosts, list those host names here.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
#
# Possible values:
#
# * A list of strings, where each string corresponds to the name of a host
#
# Related options:
#
# * scheduler/isolated_images
# * scheduler/restrict_isolated_hosts_to_isolated_images
# (list value)
#isolated_hosts =
#
# Prevent non-isolated images from being built on isolated hosts.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. Even
# then, this option doesn't affect the behavior of requests for isolated images,
# which will *always* be restricted to isolated hosts.
#
# Related options:
#
# * scheduler/isolated_images
# * scheduler/isolated_hosts
# (boolean value)
#restrict_isolated_hosts_to_isolated_images = true
#
# Image property namespace for use in the host aggregate.
#
# Images and hosts can be configured so that certain images can only be
# scheduled
# to hosts in a particular aggregate. This is done with metadata values set on
# the host aggregate that are identified by beginning with the value of this
# option. If the host is part of an aggregate with such a metadata key, the
# image
# in the request spec must have the value of that metadata in its properties in
# order for the scheduler to consider the host as acceptable.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'aggregate_image_properties_isolation' filter
# is
# enabled.
#
# Possible values:
#
# * A string, where the string corresponds to an image property namespace
#
# Related options:
#
# * aggregate_image_properties_isolation_separator
# (string value)
#aggregate_image_properties_isolation_namespace = <None>
#
# Separator character(s) for image property namespace and name.
#
# When using the aggregate_image_properties_isolation filter, the relevant
# metadata keys are prefixed with the namespace defined in the
# aggregate_image_properties_isolation_namespace configuration option plus a
# separator. This option defines the separator to be used.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect. Also note that this setting
# only affects scheduling if the 'aggregate_image_properties_isolation' filter
# is enabled.
#
# Possible values:
#
# * A string, where the string corresponds to an image property namespace
# separator character
#
# Related options:
#
# * aggregate_image_properties_isolation_namespace
# (string value)
#aggregate_image_properties_isolation_separator = .
[glance]
# Configuration options for the Image service
#
# From nova.conf
#
#
# List of glance api servers endpoints available to nova.
#
# https is used for ssl-based glance api servers.
#
# NOTE: The preferred mechanism for endpoint discovery is via keystoneauth1
# loading options. Only use api_servers if you need multiple endpoints and are
# unable to use a load balancer for some reason.
#
# Possible values:
#
# * A list of any fully qualified url of the form
# "scheme://hostname:port[/path]"
# (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image").
# (list value)
#api_servers = <None>
#
# Enable glance operation retries.
#
# Specifies the number of retries when uploading / downloading
# an image to / from glance. 0 means no retries.
# (integer value)
# Minimum value: 0
#num_retries = 0
# DEPRECATED:
# List of url schemes that can be directly accessed.
#
# This option specifies a list of url schemes that can be downloaded
# directly via the direct_url. This direct_URL can be fetched from
# Image metadata which can be used by nova to get the
# image more efficiently. nova-compute could benefit from this by
# invoking a copy when it has access to the same file system as glance.
#
# Possible values:
#
# * [file], Empty list (default)
# (list value)
# This option is deprecated for removal since 17.0.0.
# Its value may be silently ignored in the future.
# Reason:
# This was originally added for the 'nova.image.download.file' FileTransfer
# extension which was removed in the 16.0.0 Pike release. The
# 'nova.image.download.modules' extension point is not maintained
# and there is no indication of its use in production clouds.
#allowed_direct_url_schemes =
#
# Enable image signature verification.
#
# nova uses the image signature metadata from glance and verifies the signature
# of a signed image while downloading that image. If the image signature cannot
# be verified or if the image signature metadata is either incomplete or
# unavailable, then nova will not boot the image and instead will place the
# instance into an error state. This provides end users with stronger assurances
# of the integrity of the image data they are using to create servers.
#
# Related options:
#
# * The options in the `key_manager` group, as the key_manager is used
# for the signature validation.
# * Both enable_certificate_validation and default_trusted_certificate_ids
# below depend on this option being enabled.
# (boolean value)
#verify_glance_signatures = false
# DEPRECATED:
# Enable certificate validation for image signature verification.
#
# During image signature verification nova will first verify the validity of the
# image's signing certificate using the set of trusted certificates associated
# with the instance. If certificate validation fails, signature verification
# will not be performed and the image will be placed into an error state. This
# provides end users with stronger assurances that the image data is unmodified
# and trustworthy. If left disabled, image signature verification can still
# occur but the end user will not have any assurance that the signing
# certificate used to generate the image signature is still trustworthy.
#
# Related options:
#
# * This option only takes effect if verify_glance_signatures is enabled.
# * The value of default_trusted_certificate_ids may be used when this option
# is enabled.
# (boolean value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# This option is intended to ease the transition for deployments leveraging
# image signature verification. The intended state long-term is for signature
# verification and certificate validation to always happen together.
#enable_certificate_validation = false
#
# List of certificate IDs for certificates that should be trusted.
#
# May be used as a default list of trusted certificate IDs for certificate
# validation. The value of this option will be ignored if the user provides a
# list of trusted certificate IDs with an instance API request. The value of
# this option will be persisted with the instance data if signature verification
# and certificate validation are enabled and if the user did not provide an
# alternative list. If left empty when certificate validation is enabled the
# user must provide a list of trusted certificate IDs otherwise certificate
# validation will fail.
#
# Related options:
#
# * The value of this option may be used if both verify_glance_signatures and
# enable_certificate_validation are enabled.
# (list value)
#default_trusted_certificate_ids =
# Enable or disable debug logging with glanceclient. (boolean value)
#debug = false
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# (string value)
#cafile = <None>
# PEM encoded client certificate cert file (string value)
#certfile = <None>
# PEM encoded client certificate key file (string value)
#keyfile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Timeout value for http requests (integer value)
#timeout = <None>
# The default service_type for endpoint URL discovery. (string value)
#service_type = image
# The default service_name for endpoint URL discovery. (string value)
#service_name = <None>
# List of interfaces, in order of preference, for endpoint URL. (list value)
#valid_interfaces = internal,public
# The default region_name for endpoint URL discovery. (string value)
#region_name = <None>
# Always use this endpoint URL for requests for this client. NOTE: The
# unversioned endpoint should be specified here; to request a particular API
# version, use the `version`, `min-version`, and/or `max-version` options.
# (string value)
#endpoint_override = <None>
[guestfs]
#
# libguestfs is a set of tools for accessing and modifying virtual
# machine (VM) disk images. You can use this for viewing and editing
# files inside guests, scripting changes to VMs, monitoring disk
# used/free statistics, creating guests, P2V, V2V, performing backups,
# cloning VMs, building VMs, formatting disks and resizing disks.
#
# From nova.conf
#
#
# Enable/disables guestfs logging.
#
# This configures guestfs to debug messages and push them to OpenStack
# logging system. When set to True, it traces libguestfs API calls and
# enable verbose debug messages. In order to use the above feature,
# "libguestfs" package must be installed.
#
# Related options:
# Since libguestfs access and modifies VM's managed by libvirt, below options
# should be set to give access to those VM's.
# * libvirt.inject_key
# * libvirt.inject_partition
# * libvirt.inject_password
# (boolean value)
#debug = false
[healthcheck]
#
# From oslo.middleware
#
# DEPRECATED: The path to respond to healtcheck requests on. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#path = /healthcheck
# Show more detailed information as part of the response (boolean value)
#detailed = false
# Additional backends that can perform health checks and report that information
# back as part of a request. (list value)
#backends =
# Check the presence of a file to determine if an application is running on a
# port. Used by DisableByFileHealthcheck plugin. (string value)
#disable_by_file_path = <None>
# Check the presence of a file based on a port to determine if an application is
# running on a port. Expects a "port:path" list of strings. Used by
# DisableByFilesPortsHealthcheck plugin. (list value)
#disable_by_file_paths =
[hyperv]
#
# The hyperv feature allows you to configure the Hyper-V hypervisor
# driver to be used within an OpenStack deployment.
#
# From nova.conf
#
#
# Dynamic memory ratio
#
# Enables dynamic memory allocation (ballooning) when set to a value
# greater than 1. The value expresses the ratio between the total RAM
# assigned to an instance and its startup RAM amount. For example a
# ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
# RAM allocated at startup.
#
# Possible values:
#
# * 1.0: Disables dynamic memory allocation (Default).
# * Float values greater than 1.0: Enables allocation of total implied
# RAM divided by this value for startup.
# (floating point value)
#dynamic_memory_ratio = 1.0
#
# Enable instance metrics collection
#
# Enables metrics collections for an instance by using Hyper-V's
# metric APIs. Collected data can be retrieved by other apps and
# services, e.g.: Ceilometer.
# (boolean value)
#enable_instance_metrics_collection = false
#
# Instances path share
#
# The name of a Windows share mapped to the "instances_path" dir
# and used by the resize feature to copy files to the target host.
# If left blank, an administrative share (hidden network share) will
# be used, looking for the same "instances_path" used locally.
#
# Possible values:
#
# * "": An administrative share will be used (Default).
# * Name of a Windows share.
#
# Related options:
#
# * "instances_path": The directory which will be used if this option
# here is left blank.
# (string value)
#instances_path_share =
#
# Limit CPU features
#
# This flag is needed to support live migration to hosts with
# different CPU features and checked during instance creation
# in order to limit the CPU features used by the instance.
# (boolean value)
#limit_cpu_features = false
#
# Mounted disk query retry count
#
# The number of times to retry checking for a mounted disk.
# The query runs until the device can be found or the retry
# count is reached.
#
# Possible values:
#
# * Positive integer values. Values greater than 1 is recommended
# (Default: 10).
#
# Related options:
#
# * Time interval between disk mount retries is declared with
# "mounted_disk_query_retry_interval" option.
# (integer value)
# Minimum value: 0
#mounted_disk_query_retry_count = 10
#
# Mounted disk query retry interval
#
# Interval between checks for a mounted disk, in seconds.
#
# Possible values:
#
# * Time in seconds (Default: 5).
#
# Related options:
#
# * This option is meaningful when the mounted_disk_query_retry_count
# is greater than 1.
# * The retry loop runs with mounted_disk_query_retry_count and
# mounted_disk_query_retry_interval configuration options.
# (integer value)
# Minimum value: 0
#mounted_disk_query_retry_interval = 5
#
# Power state check timeframe
#
# The timeframe to be checked for instance power state changes.
# This option is used to fetch the state of the instance from Hyper-V
# through the WMI interface, within the specified timeframe.
#
# Possible values:
#
# * Timeframe in seconds (Default: 60).
# (integer value)
# Minimum value: 0
#power_state_check_timeframe = 60
#
# Power state event polling interval
#
# Instance power state change event polling frequency. Sets the
# listener interval for power state events to the given value.
# This option enhances the internal lifecycle notifications of
# instances that reboot themselves. It is unlikely that an operator
# has to change this value.
#
# Possible values:
#
# * Time in seconds (Default: 2).
# (integer value)
# Minimum value: 0
#power_state_event_polling_interval = 2
#
# qemu-img command
#
# qemu-img is required for some of the image related operations
# like converting between different image types. You can get it
# from here: (http://qemu.weilnetz.de/) or you can install the
# Cloudbase OpenStack Hyper-V Compute Driver
# (https://cloudbase.it/openstack-hyperv-driver/) which automatically
# sets the proper path for this config option. You can either give the
# full path of qemu-img.exe or set its path in the PATH environment
# variable and leave this option to the default value.
#
# Possible values:
#
# * Name of the qemu-img executable, in case it is in the same
# directory as the nova-compute service or its path is in the
# PATH environment variable (Default).
# * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
#
# Related options:
#
# * If the config_drive_cdrom option is False, qemu-img will be used to
# convert the ISO to a VHD, otherwise the configuration drive will
# remain an ISO. To use configuration drive with Hyper-V, you must
# set the mkisofs_cmd value to the full path to an mkisofs.exe
# installation.
# (string value)
#qemu_img_cmd = qemu-img.exe
#
# External virtual switch name
#
# The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
# network switch that is available with the installation of the
# Hyper-V server role. The switch includes programmatically managed
# and extensible capabilities to connect virtual machines to both
# virtual networks and the physical network. In addition, Hyper-V
# Virtual Switch provides policy enforcement for security, isolation,
# and service levels. The vSwitch represented by this config option
# must be an external one (not internal or private).
#
# Possible values:
#
# * If not provided, the first of a list of available vswitches
# is used. This list is queried using WQL.
# * Virtual switch name.
# (string value)
#vswitch_name = <None>
#
# Wait soft reboot seconds
#
# Number of seconds to wait for instance to shut down after soft
# reboot request is made. We fall back to hard reboot if instance
# does not shutdown within this window.
#
# Possible values:
#
# * Time in seconds (Default: 60).
# (integer value)
# Minimum value: 0
#wait_soft_reboot_seconds = 60
#
# Configuration drive cdrom
#
# OpenStack can be configured to write instance metadata to
# a configuration drive, which is then attached to the
# instance before it boots. The configuration drive can be
# attached as a disk drive (default) or as a CD drive.
#
# Possible values:
#
# * True: Attach the configuration drive image as a CD drive.
# * False: Attach the configuration drive image as a disk drive (Default).
#
# Related options:
#
# * This option is meaningful with force_config_drive option set to 'True'
# or when the REST API call to create an instance will have
# '--config-drive=True' flag.
# * config_drive_format option must be set to 'iso9660' in order to use
# CD drive as the configuration drive image.
# * To use configuration drive with Hyper-V, you must set the
# mkisofs_cmd value to the full path to an mkisofs.exe installation.
# Additionally, you must set the qemu_img_cmd value to the full path
# to an qemu-img command installation.
# * You can configure the Compute service to always create a configuration
# drive by setting the force_config_drive option to 'True'.
# (boolean value)
#config_drive_cdrom = false
#
# Configuration drive inject password
#
# Enables setting the admin password in the configuration drive image.
#
# Related options:
#
# * This option is meaningful when used with other options that enable
# configuration drive usage with Hyper-V, such as force_config_drive.
# * Currently, the only accepted config_drive_format is 'iso9660'.
# (boolean value)
#config_drive_inject_password = false
#
# Volume attach retry count
#
# The number of times to retry attaching a volume. Volume attachment
# is retried until success or the given retry count is reached.
#
# Possible values:
#
# * Positive integer values (Default: 10).
#
# Related options:
#
# * Time interval between attachment attempts is declared with
# volume_attach_retry_interval option.
# (integer value)
# Minimum value: 0
#volume_attach_retry_count = 10
#
# Volume attach retry interval
#
# Interval between volume attachment attempts, in seconds.
#
# Possible values:
#
# * Time in seconds (Default: 5).
#
# Related options:
#
# * This options is meaningful when volume_attach_retry_count
# is greater than 1.
# * The retry loop runs with volume_attach_retry_count and
# volume_attach_retry_interval configuration options.
# (integer value)
# Minimum value: 0
#volume_attach_retry_interval = 5
#
# Enable RemoteFX feature
#
# This requires at least one DirectX 11 capable graphics adapter for
# Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
# feature has to be enabled.
#
# Instances with RemoteFX can be requested with the following flavor
# extra specs:
#
# **os:resolution**. Guest VM screen resolution size. Acceptable values::
#
# 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
#
# ``3840x2160`` is only available on Windows / Hyper-V Server 2016.
#
# **os:monitors**. Guest VM number of monitors. Acceptable values::
#
# [1, 4] - Windows / Hyper-V Server 2012 R2
# [1, 8] - Windows / Hyper-V Server 2016
#
# **os:vram**. Guest VM VRAM amount. Only available on
# Windows / Hyper-V Server 2016. Acceptable values::
#
# 64, 128, 256, 512, 1024
# (boolean value)
#enable_remotefx = false
#
# Use multipath connections when attaching iSCSI or FC disks.
#
# This requires the Multipath IO Windows feature to be enabled. MPIO must be
# configured to claim such devices.
# (boolean value)
#use_multipath_io = false
#
# List of iSCSI initiators that will be used for estabilishing iSCSI sessions.
#
# If none are specified, the Microsoft iSCSI initiator service will choose the
# initiator.
# (list value)
#iscsi_initiator_list =
[ironic]
#
# Configuration options for Ironic driver (Bare Metal).
# If using the Ironic driver following options must be set:
# * auth_type
# * auth_url
# * project_name
# * username
# * password
# * project_domain_id or project_domain_name
# * user_domain_id or user_domain_name
#
# From nova.conf
#
# DEPRECATED: URL override for the Ironic API endpoint. (uri value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Endpoint lookup uses the service catalog via common keystoneauth1
# Adapter configuration options. In the current release, api_endpoint will
# override this behavior, but will be ignored and/or removed in a future
# release. To achieve the same result, use the endpoint_override option instead.
#api_endpoint = http://ironic.example.org:6385/
#
# The number of times to retry when a request conflicts.
# If set to 0, only try once, no retries.
#
# Related options:
#
# * api_retry_interval
# (integer value)
# Minimum value: 0
#api_max_retries = 60
#
# The number of seconds to wait before retrying the request.
#
# Related options:
#
# * api_max_retries
# (integer value)
# Minimum value: 0
#api_retry_interval = 2
# Timeout (seconds) to wait for node serial console state changed. Set to 0 to
# disable timeout. (integer value)
# Minimum value: 0
#serial_console_state_timeout = 10
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# (string value)
#cafile = <None>
# PEM encoded client certificate cert file (string value)
#certfile = <None>
# PEM encoded client certificate key file (string value)
#keyfile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Timeout value for http requests (integer value)
#timeout = <None>
# Authentication type to load (string value)
# Deprecated group/name - [ironic]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (string value)
#auth_section = <None>
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
#project_id = <None>
# Project name to scope to (string value)
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# Trust ID (string value)
#trust_id = <None>
# User ID (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [ironic]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
# The default service_type for endpoint URL discovery. (string value)
#service_type = baremetal
# The default service_name for endpoint URL discovery. (string value)
#service_name = <None>
# List of interfaces, in order of preference, for endpoint URL. (list value)
#valid_interfaces = internal,public
# The default region_name for endpoint URL discovery. (string value)
#region_name = <None>
# Always use this endpoint URL for requests for this client. NOTE: The
# unversioned endpoint should be specified here; to request a particular API
# version, use the `version`, `min-version`, and/or `max-version` options.
# (string value)
# Deprecated group/name - [ironic]/api_endpoint
#endpoint_override = <None>
[key_manager]
#
# From nova.conf
#
#
# Fixed key returned by key manager, specified in hex.
#
# Possible values:
#
# * Empty string or a key in hex value
# (string value)
#fixed_key = <None>
# Specify the key manager implementation. Options are "barbican" and "vault".
# Default is "barbican". Will support the values earlier set using
# [key_manager]/api_class for some time. (string value)
# Deprecated group/name - [key_manager]/api_class
#backend = barbican
# The type of authentication credential to create. Possible values are 'token',
# 'password', 'keystone_token', and 'keystone_password'. Required if no context
# is passed to the credential factory. (string value)
#auth_type = <None>
# Token for authentication. Required for 'token' and 'keystone_token' auth_type
# if no context is passed to the credential factory. (string value)
#token = <None>
# Username for authentication. Required for 'password' auth_type. Optional for
# the 'keystone_password' auth_type. (string value)
#username = <None>
# Password for authentication. Required for 'password' and 'keystone_password'
# auth_type. (string value)
#password = <None>
# Use this endpoint to connect to Keystone. (string value)
#auth_url = <None>
# User ID for authentication. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#user_id = <None>
# User's domain ID for authentication. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#user_domain_id = <None>
# User's domain name for authentication. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#user_domain_name = <None>
# Trust ID for trust scoping. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#trust_id = <None>
# Domain ID for domain scoping. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#domain_id = <None>
# Domain name for domain scoping. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#domain_name = <None>
# Project ID for project scoping. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#project_id = <None>
# Project name for project scoping. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#project_name = <None>
# Project's domain ID for project. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#project_domain_id = <None>
# Project's domain name for project. Optional for 'keystone_token' and
# 'keystone_password' auth_type. (string value)
#project_domain_name = <None>
# Allow fetching a new token if the current one is going to expire. Optional for
# 'keystone_token' and 'keystone_password' auth_type. (boolean value)
#reauthenticate = true
[keystone]
# Configuration options for the identity service
#
# From nova.conf
#
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# (string value)
#cafile = <None>
# PEM encoded client certificate cert file (string value)
#certfile = <None>
# PEM encoded client certificate key file (string value)
#keyfile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Timeout value for http requests (integer value)
#timeout = <None>
# The default service_type for endpoint URL discovery. (string value)
#service_type = identity
# The default service_name for endpoint URL discovery. (string value)
#service_name = <None>
# List of interfaces, in order of preference, for endpoint URL. (list value)
#valid_interfaces = internal,public
# The default region_name for endpoint URL discovery. (string value)
#region_name = <None>
# Always use this endpoint URL for requests for this client. NOTE: The
# unversioned endpoint should be specified here; to request a particular API
# version, use the `version`, `min-version`, and/or `max-version` options.
# (string value)
#endpoint_override = <None>
[keystone_authtoken]
#
# From keystonemiddleware.auth_token
#
# Complete "public" Identity API endpoint. This endpoint should not be an
# "admin" endpoint, as it should be accessible by all end users. Unauthenticated
# clients are redirected to this endpoint to authenticate. Although this
# endpoint should ideally be unversioned, client support in the wild varies. If
# you're using a versioned v2 endpoint here, then this should *not* be the same
# endpoint the service user utilizes for validating tokens, because normal end
# users may not be able to reach that endpoint. (string value)
# Deprecated group/name - [keystone_authtoken]/auth_uri
#www_authenticate_uri = <None>
# DEPRECATED: Complete "public" Identity API endpoint. This endpoint should not
# be an "admin" endpoint, as it should be accessible by all end users.
# Unauthenticated clients are redirected to this endpoint to authenticate.
# Although this endpoint should ideally be unversioned, client support in the
# wild varies. If you're using a versioned v2 endpoint here, then this should
# *not* be the same endpoint the service user utilizes for validating tokens,
# because normal end users may not be able to reach that endpoint. This option
# is deprecated in favor of www_authenticate_uri and will be removed in the S
# release. (string value)
# This option is deprecated for removal since Queens.
# Its value may be silently ignored in the future.
# Reason: The auth_uri option is deprecated in favor of www_authenticate_uri and
# will be removed in the S release.
#auth_uri = <None>
# API version of the admin Identity API endpoint. (string value)
#auth_version = <None>
# Do not handle authorization requests within the middleware, but delegate the
# authorization decision to downstream WSGI components. (boolean value)
#delay_auth_decision = false
# Request timeout value for communicating with Identity API server. (integer
# value)
#http_connect_timeout = <None>
# How many times are we trying to reconnect when communicating with Identity API
# Server. (integer value)
#http_request_max_retries = 3
# Request environment key where the Swift cache object is stored. When
# auth_token middleware is deployed with a Swift cache, use this option to have
# the middleware share a caching backend with swift. Otherwise, use the
# ``memcached_servers`` option instead. (string value)
#cache = <None>
# Required if identity server requires client certificate (string value)
#certfile = <None>
# Required if identity server requires client certificate (string value)
#keyfile = <None>
# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
# Defaults to system CAs. (string value)
#cafile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# The region in which the identity server can be found. (string value)
#region_name = <None>
# DEPRECATED: Directory used to cache files related to PKI tokens. This option
# has been deprecated in the Ocata release and will be removed in the P release.
# (string value)
# This option is deprecated for removal since Ocata.
# Its value may be silently ignored in the future.
# Reason: PKI token format is no longer supported.
#signing_dir = <None>
# Optionally specify a list of memcached server(s) to use for caching. If left
# undefined, tokens will instead be cached in-process. (list value)
# Deprecated group/name - [keystone_authtoken]/memcache_servers
#memcached_servers = <None>
# In order to prevent excessive effort spent validating tokens, the middleware
# caches previously-seen tokens for a configurable duration (in seconds). Set to
# -1 to disable caching completely. (integer value)
#token_cache_time = 300
# DEPRECATED: Determines the frequency at which the list of revoked tokens is
# retrieved from the Identity service (in seconds). A high number of revocation
# events combined with a low cache duration may significantly reduce
# performance. Only valid for PKI tokens. This option has been deprecated in the
# Ocata release and will be removed in the P release. (integer value)
# This option is deprecated for removal since Ocata.
# Its value may be silently ignored in the future.
# Reason: PKI token format is no longer supported.
#revocation_cache_time = 10
# (Optional) If defined, indicate whether token data should be authenticated or
# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
# cache. If the value is not one of these options or empty, auth_token will
# raise an exception on initialization. (string value)
# Possible values:
# None - <No description provided>
# MAC - <No description provided>
# ENCRYPT - <No description provided>
#memcache_security_strategy = None
# (Optional, mandatory if memcache_security_strategy is defined) This string is
# used for key derivation. (string value)
#memcache_secret_key = <None>
# (Optional) Number of seconds memcached server is considered dead before it is
# tried again. (integer value)
#memcache_pool_dead_retry = 300
# (Optional) Maximum total number of open connections to every memcached server.
# (integer value)
#memcache_pool_maxsize = 10
# (Optional) Socket timeout in seconds for communicating with a memcached
# server. (integer value)
#memcache_pool_socket_timeout = 3
# (Optional) Number of seconds a connection to memcached is held unused in the
# pool before it is closed. (integer value)
#memcache_pool_unused_timeout = 60
# (Optional) Number of seconds that an operation will wait to get a memcached
# client connection from the pool. (integer value)
#memcache_pool_conn_get_timeout = 10
# (Optional) Use the advanced (eventlet safe) memcached client pool. The
# advanced pool will only work under python 2.x. (boolean value)
#memcache_use_advanced_pool = false
# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
# middleware will not ask for service catalog on token validation and will not
# set the X-Service-Catalog header. (boolean value)
#include_service_catalog = true
# Used to control the use and type of token binding. Can be set to: "disabled"
# to not check token binding. "permissive" (default) to validate binding
# information if the bind type is of a form known to the server and ignore it if
# not. "strict" like "permissive" but if the bind type is unknown the token will
# be rejected. "required" any form of token binding is needed to be allowed.
# Finally the name of a binding method that must be present in tokens. (string
# value)
#enforce_token_bind = permissive
# DEPRECATED: If true, the revocation list will be checked for cached tokens.
# This requires that PKI tokens are configured on the identity server. (boolean
# value)
# This option is deprecated for removal since Ocata.
# Its value may be silently ignored in the future.
# Reason: PKI token format is no longer supported.
#check_revocations_for_cached = false
# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may be a
# single algorithm or multiple. The algorithms are those supported by Python
# standard hashlib.new(). The hashes will be tried in the order given, so put
# the preferred one first for performance. The result of the first hash will be
# stored in the cache. This will typically be set to multiple values only while
# migrating from a less secure algorithm to a more secure one. Once all the old
# tokens are expired this option should be set to a single value for better
# performance. (list value)
# This option is deprecated for removal since Ocata.
# Its value may be silently ignored in the future.
# Reason: PKI token format is no longer supported.
#hash_algorithms = md5
# A choice of roles that must be present in a service token. Service tokens are
# allowed to request that an expired token can be used and so this check should
# tightly control that only actual services should be sending this token. Roles
# here are applied as an ANY check so any role in this list must be present. For
# backwards compatibility reasons this currently only affects the allow_expired
# check. (list value)
#service_token_roles = service
# For backwards compatibility reasons we must let valid service tokens pass that
# don't pass the service_token_roles check as valid. Setting this true will
# become the default in a future release and should be enabled if possible.
# (boolean value)
#service_token_roles_required = false
# Authentication type to load (string value)
# Deprecated group/name - [keystone_authtoken]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (string value)
#auth_section = <None>
[libvirt]
#
# Libvirt options allows cloud administrator to configure related
# libvirt hypervisor driver to be used within an OpenStack deployment.
#
# Almost all of the libvirt config options are influence by ``virt_type`` config
# which describes the virtualization type (or so called domain type) libvirt
# should use for specific features such as live migration, snapshot.
#
# From nova.conf
#
#
# The ID of the image to boot from to rescue data from a corrupted instance.
#
# If the rescue REST API operation doesn't provide an ID of an image to
# use, the image which is referenced by this ID is used. If this
# option is not set, the image from the instance is used.
#
# Possible values:
#
# * An ID of an image or nothing. If it points to an *Amazon Machine
# Image* (AMI), consider to set the config options ``rescue_kernel_id``
# and ``rescue_ramdisk_id`` too. If nothing is set, the image of the instance
# is used.
#
# Related options:
#
# * ``rescue_kernel_id``: If the chosen rescue image allows the separate
# definition of its kernel disk, the value of this option is used,
# if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
# format is used for the rescue image.
# * ``rescue_ramdisk_id``: If the chosen rescue image allows the separate
# definition of its RAM disk, the value of this option is used if,
# specified. This is the case when *Amazon*'s AMI/AKI/ARI image
# format is used for the rescue image.
# (string value)
#rescue_image_id = <None>
#
# The ID of the kernel (AKI) image to use with the rescue image.
#
# If the chosen rescue image allows the separate definition of its kernel
# disk, the value of this option is used, if specified. This is the case
# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image.
#
# Possible values:
#
# * An ID of an kernel image or nothing. If nothing is specified, the kernel
# disk from the instance is used if it was launched with one.
#
# Related options:
#
# * ``rescue_image_id``: If that option points to an image in *Amazon*'s
# AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id`` too.
# (string value)
#rescue_kernel_id = <None>
#
# The ID of the RAM disk (ARI) image to use with the rescue image.
#
# If the chosen rescue image allows the separate definition of its RAM
# disk, the value of this option is used, if specified. This is the case
# when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image.
#
# Possible values:
#
# * An ID of a RAM disk image or nothing. If nothing is specified, the RAM
# disk from the instance is used if it was launched with one.
#
# Related options:
#
# * ``rescue_image_id``: If that option points to an image in *Amazon*'s
# AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id`` too.
# (string value)
#rescue_ramdisk_id = <None>
#
# Describes the virtualization type (or so called domain type) libvirt should
# use.
#
# The choice of this type must match the underlying virtualization strategy
# you have chosen for this host.
#
# Possible values:
#
# * See the predefined set of case-sensitive values.
#
# Related options:
#
# * ``connection_uri``: depends on this
# * ``disk_prefix``: depends on this
# * ``cpu_mode``: depends on this
# * ``cpu_model``: depends on this
# (string value)
# Possible values:
# kvm - <No description provided>
# lxc - <No description provided>
# qemu - <No description provided>
# uml - <No description provided>
# xen - <No description provided>
# parallels - <No description provided>
#virt_type = kvm
#
# Overrides the default libvirt URI of the chosen virtualization type.
#
# If set, Nova will use this URI to connect to libvirt.
#
# Possible values:
#
# * An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for example.
# This is only necessary if the URI differs to the commonly known URIs
# for the chosen virtualization type.
#
# Related options:
#
# * ``virt_type``: Influences what is used as default value here.
# (string value)
#connection_uri =
#
# Allow the injection of an admin password for instance only at ``create`` and
# ``rebuild`` process.
#
# There is no agent needed within the image to do this. If *libguestfs* is
# available on the host, it will be used. Otherwise *nbd* is used. The file
# system of the image will be mounted and the admin password, which is provided
# in the REST API call will be injected as password for the root user. If no
# root user is available, the instance won't be launched and an error is thrown.
# Be aware that the injection is *not* possible when the instance gets launched
# from a volume.
#
# Possible values:
#
# * True: Allows the injection.
# * False (default): Disallows the injection. Any via the REST API provided
# admin password will be silently ignored.
#
# Related options:
#
# * ``inject_partition``: That option will decide about the discovery and usage
# of the file system. It also can disable the injection at all.
# (boolean value)
#inject_password = false
#
# Allow the injection of an SSH key at boot time.
#
# There is no agent needed within the image to do this. If *libguestfs* is
# available on the host, it will be used. Otherwise *nbd* is used. The file
# system of the image will be mounted and the SSH key, which is provided
# in the REST API call will be injected as SSH key for the root user and
# appended to the ``authorized_keys`` of that user. The SELinux context will
# be set if necessary. Be aware that the injection is *not* possible when the
# instance gets launched from a volume.
#
# This config option will enable directly modifying the instance disk and does
# not affect what cloud-init may do using data from config_drive option or the
# metadata service.
#
# Related options:
#
# * ``inject_partition``: That option will decide about the discovery and usage
# of the file system. It also can disable the injection at all.
# (boolean value)
#inject_key = false
#
# Determines the way how the file system is chosen to inject data into it.
#
# *libguestfs* will be used a first solution to inject data. If that's not
# available on the host, the image will be locally mounted on the host as a
# fallback solution. If libguestfs is not able to determine the root partition
# (because there are more or less than one root partition) or cannot mount the
# file system it will result in an error and the instance won't be boot.
#
# Possible values:
#
# * -2 => disable the injection of data.
# * -1 => find the root partition with the file system to mount with libguestfs
# * 0 => The image is not partitioned
# * >0 => The number of the partition to use for the injection
#
# Related options:
#
# * ``inject_key``: If this option allows the injection of a SSH key it depends
# on value greater or equal to -1 for ``inject_partition``.
# * ``inject_password``: If this option allows the injection of an admin
# password
# it depends on value greater or equal to -1 for ``inject_partition``.
# * ``guestfs`` You can enable the debug log level of libguestfs with this
# config option. A more verbose output will help in debugging issues.
# * ``virt_type``: If you use ``lxc`` as virt_type it will be treated as a
# single partition image
# (integer value)
# Minimum value: -2
#inject_partition = -2
# DEPRECATED:
# Enable a mouse cursor within a graphical VNC or SPICE sessions.
#
# This will only be taken into account if the VM is fully virtualized and VNC
# and/or SPICE is enabled. If the node doesn't support a graphical framebuffer,
# then it is valid to set this to False.
#
# Related options:
# * ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have an effect.
# * ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is enabled and the
# spice agent is disabled, the config value of ``use_usb_tablet`` will have
# an effect.
# (boolean value)
# This option is deprecated for removal since 14.0.0.
# Its value may be silently ignored in the future.
# Reason: This option is being replaced by the 'pointer_model' option.
#use_usb_tablet = true
#
# The IP address or hostname to be used as the target for live migration
# traffic.
#
# If this option is set to None, the hostname of the migration target compute
# node will be used.
#
# This option is useful in environments where the live-migration traffic can
# impact the network plane significantly. A separate network for live-migration
# traffic can then use this config option and avoids the impact on the
# management network.
#
# Possible values:
#
# * A valid IP address or hostname, else None.
#
# Related options:
#
# * ``live_migration_tunnelled``: The live_migration_inbound_addr value is
# ignored if tunneling is enabled.
# (string value)
#live_migration_inbound_addr = <None>
# DEPRECATED:
# Live migration target URI to use.
#
# Override the default libvirt live migration target URI (which is dependent
# on virt_type). Any included "%s" is replaced with the migration target
# hostname.
#
# If this option is set to None (which is the default), Nova will automatically
# generate the `live_migration_uri` value based on only 4 supported `virt_type`
# in following list:
#
# * 'kvm': 'qemu+tcp://%s/system'
# * 'qemu': 'qemu+tcp://%s/system'
# * 'xen': 'xenmigr://%s/system'
# * 'parallels': 'parallels+tcp://%s/system'
#
# Related options:
#
# * ``live_migration_inbound_addr``: If ``live_migration_inbound_addr`` value
# is not None and ``live_migration_tunnelled`` is False, the ip/hostname
# address of target compute node is used instead of ``live_migration_uri`` as
# the uri for live migration.
# * ``live_migration_scheme``: If ``live_migration_uri`` is not set, the scheme
# used for live migration is taken from ``live_migration_scheme`` instead.
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# live_migration_uri is deprecated for removal in favor of two other options
# that
# allow to change live migration scheme and target URI:
# ``live_migration_scheme``
# and ``live_migration_inbound_addr`` respectively.
#live_migration_uri = <None>
#
# URI scheme used for live migration.
#
# Override the default libvirt live migration scheme (which is dependent on
# virt_type). If this option is set to None, nova will automatically choose a
# sensible default based on the hypervisor. It is not recommended that you
# change
# this unless you are very sure that hypervisor supports a particular scheme.
#
# Related options:
#
# * ``virt_type``: This option is meaningful only when ``virt_type`` is set to
# `kvm` or `qemu`.
# * ``live_migration_uri``: If ``live_migration_uri`` value is not None, the
# scheme used for live migration is taken from ``live_migration_uri`` instead.
# (string value)
#live_migration_scheme = <None>
#
# Enable tunnelled migration.
#
# This option enables the tunnelled migration feature, where migration data is
# transported over the libvirtd connection. If enabled, we use the
# VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
# the network to allow direct hypervisor to hypervisor communication.
# If False, use the native transport. If not set, Nova will choose a
# sensible default based on, for example the availability of native
# encryption support in the hypervisor. Enabling this option will definitely
# impact performance massively.
#
# Note that this option is NOT compatible with use of block migration.
#
# Related options:
#
# * ``live_migration_inbound_addr``: The live_migration_inbound_addr value is
# ignored if tunneling is enabled.
# (boolean value)
#live_migration_tunnelled = false
#
# Maximum bandwidth(in MiB/s) to be used during migration.
#
# If set to 0, the hypervisor will choose a suitable default. Some hypervisors
# do not support this feature and will return an error if bandwidth is not 0.
# Please refer to the libvirt documentation for further details.
# (integer value)
#live_migration_bandwidth = 0
#
# Maximum permitted downtime, in milliseconds, for live migration
# switchover.
#
# Will be rounded up to a minimum of 100ms. You can increase this value
# if you want to allow live-migrations to complete faster, or avoid
# live-migration timeout errors by allowing the guest to be paused for
# longer during the live-migration switch over.
#
# Related options:
#
# * live_migration_completion_timeout
# (integer value)
# Minimum value: 100
#live_migration_downtime = 500
#
# Number of incremental steps to reach max downtime value.
#
# Will be rounded up to a minimum of 3 steps.
# (integer value)
# Minimum value: 3
#live_migration_downtime_steps = 10
#
# Time to wait, in seconds, between each step increase of the migration
# downtime.
#
# Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to be
# transferred, with lower bound of a minimum of 2 GiB per device.
# (integer value)
# Minimum value: 3
#live_migration_downtime_delay = 75
#
# Time to wait, in seconds, for migration to successfully complete transferring
# data before aborting the operation.
#
# Value is per GiB of guest RAM + disk to be transferred, with lower bound of
# a minimum of 2 GiB. Should usually be larger than downtime delay * downtime
# steps. Set to 0 to disable timeouts.
#
# Related options:
#
# * live_migration_downtime
# * live_migration_downtime_steps
# * live_migration_downtime_delay
# (integer value)
# Note: This option can be changed without restarting.
#live_migration_completion_timeout = 800
# DEPRECATED:
# Time to wait, in seconds, for migration to make forward progress in
# transferring data before aborting the operation.
#
# Set to 0 to disable timeouts.
#
# This is deprecated, and now disabled by default because we have found serious
# bugs in this feature that caused false live-migration timeout failures. This
# feature will be removed or replaced in a future release.
# (integer value)
# Note: This option can be changed without restarting.
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Serious bugs found in this feature.
#live_migration_progress_timeout = 0
#
# This option allows nova to switch an on-going live migration to post-copy
# mode, i.e., switch the active VM to the one on the destination node before the
# migration is complete, therefore ensuring an upper bound on the memory that
# needs to be transferred. Post-copy requires libvirt>=1.3.3 and QEMU>=2.5.0.
#
# When permitted, post-copy mode will be automatically activated if a
# live-migration memory copy iteration does not make percentage increase of at
# least 10% over the last iteration.
#
# The live-migration force complete API also uses post-copy when permitted. If
# post-copy mode is not available, force complete falls back to pausing the VM
# to ensure the live-migration operation will complete.
#
# When using post-copy mode, if the source and destination hosts loose network
# connectivity, the VM being live-migrated will need to be rebooted. For more
# details, please see the Administration guide.
#
# Related options:
#
# * live_migration_permit_auto_converge
# (boolean value)
#live_migration_permit_post_copy = false
#
# This option allows nova to start live migration with auto converge on.
#
# Auto converge throttles down CPU if a progress of on-going live migration
# is slow. Auto converge will only be used if this flag is set to True and
# post copy is not permitted or post copy is unavailable due to the version
# of libvirt and QEMU in use.
#
# Related options:
#
# * live_migration_permit_post_copy
# (boolean value)
#live_migration_permit_auto_converge = false
#
# Determine the snapshot image format when sending to the image service.
#
# If set, this decides what format is used when sending the snapshot to the
# image service.
# If not set, defaults to same type as source image.
#
# Possible values:
#
# * ``raw``: RAW disk format
# * ``qcow2``: KVM default disk format
# * ``vmdk``: VMWare default disk format
# * ``vdi``: VirtualBox default disk format
# * If not set, defaults to same type as source image.
# (string value)
# Possible values:
# raw - <No description provided>
# qcow2 - <No description provided>
# vmdk - <No description provided>
# vdi - <No description provided>
#snapshot_image_format = <None>
#
# Override the default disk prefix for the devices attached to an instance.
#
# If set, this is used to identify a free disk device name for a bus.
#
# Possible values:
#
# * Any prefix which will result in a valid disk device name like 'sda' or 'hda'
# for example. This is only necessary if the device names differ to the
# commonly known device name prefixes for a virtualization type such as: sd,
# xvd, uvd, vd.
#
# Related options:
#
# * ``virt_type``: Influences which device type is used, which determines
# the default disk prefix.
# (string value)
#disk_prefix = <None>
# Number of seconds to wait for instance to shut down after soft reboot request
# is made. We fall back to hard reboot if instance does not shutdown within this
# window. (integer value)
#wait_soft_reboot_seconds = 120
#
# Is used to set the CPU mode an instance should have.
#
# If virt_type="kvm|qemu", it will default to "host-model", otherwise it will
# default to "none".
#
# Possible values:
#
# * ``host-model``: Clones the host CPU feature flags
# * ``host-passthrough``: Use the host CPU model exactly
# * ``custom``: Use a named CPU model
# * ``none``: Don't set a specific CPU model. For instances with
# ``virt_type`` as KVM/QEMU, the default CPU model from QEMU will be used,
# which provides a basic set of CPU features that are compatible with most
# hosts.
#
# Related options:
#
# * ``cpu_model``: This should be set ONLY when ``cpu_mode`` is set to
# ``custom``. Otherwise, it would result in an error and the instance
# launch will fail.
#
# (string value)
# Possible values:
# host-model - <No description provided>
# host-passthrough - <No description provided>
# custom - <No description provided>
# none - <No description provided>
#cpu_mode = <None>
#
# Set the name of the libvirt CPU model the instance should use.
#
# Possible values:
#
# * The named CPU models listed in ``/usr/share/libvirt/cpu_map.xml``
#
# Related options:
#
# * ``cpu_mode``: This should be set to ``custom`` ONLY when you want to
# configure (via ``cpu_model``) a specific named CPU model. Otherwise, it
# would result in an error and the instance launch will fail.
#
# * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu`` use this.
# (string value)
#cpu_model = <None>
#
# This allows specifying granular CPU feature flags when specifying CPU
# models. For example, to explicitly specify the ``pcid``
# (Process-Context ID, an Intel processor feature) flag to the "IvyBridge"
# virtual CPU model::
#
# [libvirt]
# cpu_mode = custom
# cpu_model = IvyBridge
# cpu_model_extra_flags = pcid
#
# Currently, the choice is restricted to a few options: ``pcid``,
# ``ssbd``, ``virt-ssbd``, ``amd-ssbd``, and ``amd-no-ssb`` (the options
# are case-insensitive, so ``PCID`` is also valid, for example). These
# flags are now required to address the guest performance degradation as
# a result of applying the "Meltdown" CVE fixes (``pcid``) and exposure
# mitigation (``ssbd`` and related options) on affected CPU models.
#
# Note that when using this config attribute to set the 'PCID' and
# related CPU flags, not all virtual (i.e. libvirt / QEMU) CPU models
# need it:
#
# * The only virtual CPU models that include the 'PCID' capability are
# Intel "Haswell", "Broadwell", and "Skylake" variants.
#
# * The libvirt / QEMU CPU models "Nehalem", "Westmere", "SandyBridge",
# and "IvyBridge" will _not_ expose the 'PCID' capability by default,
# even if the host CPUs by the same name include it. I.e. 'PCID' needs
# to be explicitly specified when using the said virtual CPU models.
#
# For more information about ``ssbd`` and related options,
# please refer to the following security updates:
#
# https://www.us-cert.gov/ncas/alerts/TA18-141A
#
# https://www.redhat.com/archives/libvir-list/2018-May/msg01562.html
#
# https://www.redhat.com/archives/libvir-list/2018-June/msg01111.html
#
# For now, the ``cpu_model_extra_flags`` config attribute is valid only in
# combination with ``cpu_mode`` + ``cpu_model`` options.
#
# Besides ``custom``, the libvirt driver has two other CPU modes: The
# default, ``host-model``, tells it to do the right thing with respect to
# handling 'PCID' CPU flag for the guest -- *assuming* you are running
# updated processor microcode, host and guest kernel, libvirt, and QEMU.
# The other mode, ``host-passthrough``, checks if 'PCID' is available in
# the hardware, and if so directly passes it through to the Nova guests.
# Thus, in context of 'PCID', with either of these CPU modes
# (``host-model`` or ``host-passthrough``), there is no need to use the
# ``cpu_model_extra_flags``.
#
# Related options:
#
# * cpu_mode
# * cpu_model
# (list value)
#cpu_model_extra_flags =
# Location where libvirt driver will store snapshots before uploading them to
# image service (string value)
#snapshots_directory = $instances_path/snapshots
# Location where the Xen hvmloader is kept (string value)
#xen_hvmloader_path = /usr/lib/xen/boot/hvmloader
#
# Specific cache modes to use for different disk types.
#
# For example: file=directsync,block=none,network=writeback
#
# For local or direct-attached storage, it is recommended that you use
# writethrough (default) mode, as it ensures data integrity and has acceptable
# I/O performance for applications running in the guest, especially for read
# operations. However, caching mode none is recommended for remote NFS storage,
# because direct I/O operations (O_DIRECT) perform better than synchronous I/O
# operations (with O_SYNC). Caching mode none effectively turns all guest I/O
# operations into direct I/O operations on the host, which is the NFS client in
# this environment.
#
# Possible cache modes:
#
# * default: Same as writethrough.
# * none: With caching mode set to none, the host page cache is disabled, but
# the disk write cache is enabled for the guest. In this mode, the write
# performance in the guest is optimal because write operations bypass the host
# page cache and go directly to the disk write cache. If the disk write cache
# is battery-backed, or if the applications or storage stack in the guest
# transfer data properly (either through fsync operations or file system
# barriers), then data integrity can be ensured. However, because the host
# page cache is disabled, the read performance in the guest would not be as
# good as in the modes where the host page cache is enabled, such as
# writethrough mode. Shareable disk devices, like for a multi-attachable block
# storage volume, will have their cache mode set to 'none' regardless of
# configuration.
# * writethrough: writethrough mode is the default caching mode. With
# caching set to writethrough mode, the host page cache is enabled, but the
# disk write cache is disabled for the guest. Consequently, this caching mode
# ensures data integrity even if the applications and storage stack in the
# guest do not transfer data to permanent storage properly (either through
# fsync operations or file system barriers). Because the host page cache is
# enabled in this mode, the read performance for applications running in the
# guest is generally better. However, the write performance might be reduced
# because the disk write cache is disabled.
# * writeback: With caching set to writeback mode, both the host page cache
# and the disk write cache are enabled for the guest. Because of this, the
# I/O performance for applications running in the guest is good, but the data
# is not protected in a power failure. As a result, this caching mode is
# recommended only for temporary data where potential data loss is not a
# concern.
# * directsync: Like "writethrough", but it bypasses the host page cache.
# * unsafe: Caching mode of unsafe ignores cache transfer operations
# completely. As its name implies, this caching mode should be used only for
# temporary data where data loss is not a concern. This mode can be useful for
# speeding up guest installations, but you should switch to another caching
# mode in production environments.
# (list value)
#disk_cachemodes =
# A path to a device that will be used as source of entropy on the host.
# Permitted options are: /dev/random or /dev/hwrng (string value)
#rng_dev_path = <None>
# For qemu or KVM guests, set this option to specify a default machine type per
# host architecture. You can find a list of supported machine types in your
# environment by checking the output of the "virsh capabilities"command. The
# format of the value for this config option is host-arch=machine-type. For
# example: x86_64=machinetype1,armv7l=machinetype2 (list value)
#hw_machine_type = <None>
# The data source used to the populate the host "serial" UUID exposed to guest
# in the virtual BIOS. (string value)
# Possible values:
# none - <No description provided>
# os - <No description provided>
# hardware - <No description provided>
# auto - <No description provided>
#sysinfo_serial = auto
# A number of seconds to memory usage statistics period. Zero or negative value
# mean to disable memory usage statistics. (integer value)
#mem_stats_period_seconds = 10
# List of uid targets and ranges.Syntax is guest-uid:host-uid:countMaximum of 5
# allowed. (list value)
#uid_maps =
# List of guid targets and ranges.Syntax is guest-gid:host-gid:countMaximum of 5
# allowed. (list value)
#gid_maps =
# In a realtime host context vCPUs for guest will run in that scheduling
# priority. Priority depends on the host kernel (usually 1-99) (integer value)
#realtime_scheduler_priority = 1
#
# This is a performance event list which could be used as monitor. These events
# will be passed to libvirt domain xml while creating a new instances.
# Then event statistics data can be collected from libvirt. The minimum
# libvirt version is 2.0.0. For more information about `Performance monitoring
# events`, refer https://libvirt.org/formatdomain.html#elementsPerf .
#
# Possible values:
# * A string list. For example: ``enabled_perf_events = cmt, mbml, mbmt``
# The supported events list can be found in
# https://libvirt.org/html/libvirt-libvirt-domain.html ,
# which you may need to search key words ``VIR_PERF_PARAM_*``
# (list value)
#enabled_perf_events =
#
# VM Images format.
#
# If default is specified, then use_cow_images flag is used instead of this
# one.
#
# Related options:
#
# * virt.use_cow_images
# * images_volume_group
# * [workarounds]/ensure_libvirt_rbd_instance_dir_cleanup
# (string value)
# Possible values:
# raw - <No description provided>
# flat - <No description provided>
# qcow2 - <No description provided>
# lvm - <No description provided>
# rbd - <No description provided>
# ploop - <No description provided>
# default - <No description provided>
#images_type = default
#
# LVM Volume Group that is used for VM images, when you specify images_type=lvm
#
# Related options:
#
# * images_type
# (string value)
#images_volume_group = <None>
#
# Create sparse logical volumes (with virtualsize) if this flag is set to True.
# (boolean value)
#sparse_logical_volumes = false
# The RADOS pool in which rbd volumes are stored (string value)
#images_rbd_pool = rbd
# Path to the ceph configuration file to use (string value)
#images_rbd_ceph_conf =
#
# Discard option for nova managed disks.
#
# Requires:
#
# * Libvirt >= 1.0.6
# * Qemu >= 1.5 (raw format)
# * Qemu >= 1.6 (qcow2 format)
# (string value)
# Possible values:
# ignore - <No description provided>
# unmap - <No description provided>
#hw_disk_discard = <None>
# DEPRECATED: Allows image information files to be stored in non-standard
# locations (string value)
# This option is deprecated for removal since 14.0.0.
# Its value may be silently ignored in the future.
# Reason: Image info files are no longer used by the image cache
#image_info_filename_pattern = $instances_path/$image_cache_subdirectory_name/%(image)s.info
# Unused resized base images younger than this will not be removed (integer
# value)
#remove_unused_resized_minimum_age_seconds = 3600
# DEPRECATED: Write a checksum for files in _base to disk (boolean value)
# This option is deprecated for removal since 14.0.0.
# Its value may be silently ignored in the future.
# Reason: The image cache no longer periodically calculates checksums of stored
# images. Data integrity can be checked at the block or filesystem level.
#checksum_base_images = false
# DEPRECATED: How frequently to checksum base images (integer value)
# This option is deprecated for removal since 14.0.0.
# Its value may be silently ignored in the future.
# Reason: The image cache no longer periodically calculates checksums of stored
# images. Data integrity can be checked at the block or filesystem level.
#checksum_interval_seconds = 3600
#
# Method used to wipe ephemeral disks when they are deleted. Only takes effect
# if LVM is set as backing storage.
#
# Possible values:
#
# * none - do not wipe deleted volumes
# * zero - overwrite volumes with zeroes
# * shred - overwrite volume repeatedly
#
# Related options:
#
# * images_type - must be set to ``lvm``
# * volume_clear_size
# (string value)
# Possible values:
# none - <No description provided>
# zero - <No description provided>
# shred - <No description provided>
#volume_clear = zero
#
# Size of area in MiB, counting from the beginning of the allocated volume,
# that will be cleared using method set in ``volume_clear`` option.
#
# Possible values:
#
# * 0 - clear whole volume
# * >0 - clear specified amount of MiB
#
# Related options:
#
# * images_type - must be set to ``lvm``
# * volume_clear - must be set and the value must be different than ``none``
# for this option to have any impact
# (integer value)
# Minimum value: 0
#volume_clear_size = 0
#
# Enable snapshot compression for ``qcow2`` images.
#
# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all
# snapshots to be in ``qcow2`` format, independently from their original image
# type.
#
# Related options:
#
# * snapshot_image_format
# (boolean value)
#snapshot_compression = false
# Use virtio for bridge interfaces with KVM/QEMU (boolean value)
#use_virtio_for_bridges = true
#
# Use multipath connection of the iSCSI or FC volume
#
# Volumes can be connected in the LibVirt as multipath devices. This will
# provide high availability and fault tolerance.
# (boolean value)
# Deprecated group/name - [libvirt]/iscsi_use_multipath
#volume_use_multipath = false
#
# Number of times to scan given storage protocol to find volume.
# (integer value)
# Deprecated group/name - [libvirt]/num_iscsi_scan_tries
#num_volume_scan_tries = 5
#
# Number of times to rediscover AoE target to find volume.
#
# Nova provides support for block storage attaching to hosts via AOE (ATA over
# Ethernet). This option allows the user to specify the maximum number of retry
# attempts that can be made to discover the AoE device.
# (integer value)
#num_aoe_discover_tries = 3
#
# The iSCSI transport iface to use to connect to target in case offload support
# is desired.
#
# Default format is of the form <transport_name>.<hwaddress> where
# <transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx, ocs) and
# <hwaddress> is the MAC address of the interface and can be generated via the
# iscsiadm -m iface command. Do not confuse the iscsi_iface parameter to be
# provided here with the actual transport name.
# (string value)
# Deprecated group/name - [libvirt]/iscsi_transport
#iscsi_iface = <None>
#
# Number of times to scan iSER target to find volume.
#
# iSER is a server network protocol that extends iSCSI protocol to use Remote
# Direct Memory Access (RDMA). This option allows the user to specify the
# maximum
# number of scan attempts that can be made to find iSER volume.
# (integer value)
#num_iser_scan_tries = 5
#
# Use multipath connection of the iSER volume.
#
# iSER volumes can be connected as multipath devices. This will provide high
# availability and fault tolerance.
# (boolean value)
#iser_use_multipath = false
#
# The RADOS client name for accessing rbd(RADOS Block Devices) volumes.
#
# Libvirt will refer to this user when connecting and authenticating with
# the Ceph RBD server.
# (string value)
#rbd_user = <None>
#
# The libvirt UUID of the secret for the rbd_user volumes.
# (string value)
#rbd_secret_uuid = <None>
#
# Directory where the NFS volume is mounted on the compute node.
# The default is 'mnt' directory of the location where nova's Python module
# is installed.
#
# NFS provides shared storage for the OpenStack Block Storage service.
#
# Possible values:
#
# * A string representing absolute path of mount point.
# (string value)
#nfs_mount_point_base = $state_path/mnt
#
# Mount options passed to the NFS client. See section of the nfs man page
# for details.
#
# Mount options controls the way the filesystem is mounted and how the
# NFS client behaves when accessing files on this mount point.
#
# Possible values:
#
# * Any string representing mount options separated by commas.
# * Example string: vers=3,lookupcache=pos
# (string value)
#nfs_mount_options = <None>
#
# Directory where the Quobyte volume is mounted on the compute node.
#
# Nova supports Quobyte volume driver that enables storing Block Storage
# service volumes on a Quobyte storage back end. This Option specifies the
# path of the directory where Quobyte volume is mounted.
#
# Possible values:
#
# * A string representing absolute path of mount point.
# (string value)
#quobyte_mount_point_base = $state_path/mnt
# Path to a Quobyte Client configuration file. (string value)
#quobyte_client_cfg = <None>
#
# Directory where the SMBFS shares are mounted on the compute node.
# (string value)
#smbfs_mount_point_base = $state_path/mnt
#
# Mount options passed to the SMBFS client.
#
# Provide SMBFS options as a single string containing all parameters.
# See mount.cifs man page for details. Note that the libvirt-qemu ``uid``
# and ``gid`` must be specified.
# (string value)
#smbfs_mount_options =
#
# libvirt's transport method for remote file operations.
#
# Because libvirt cannot use RPC to copy files over network to/from other
# compute nodes, other method must be used for:
#
# * creating directory on remote host
# * creating file on remote host
# * removing file from remote host
# * copying file to remote host
# (string value)
# Possible values:
# ssh - <No description provided>
# rsync - <No description provided>
#remote_filesystem_transport = ssh
#
# Directory where the Virtuozzo Storage clusters are mounted on the compute
# node.
#
# This option defines non-standard mountpoint for Vzstorage cluster.
#
# Related options:
#
# * vzstorage_mount_* group of parameters
# (string value)
#vzstorage_mount_point_base = $state_path/mnt
#
# Mount owner user name.
#
# This option defines the owner user of Vzstorage cluster mountpoint.
#
# Related options:
#
# * vzstorage_mount_* group of parameters
# (string value)
#vzstorage_mount_user = stack
#
# Mount owner group name.
#
# This option defines the owner group of Vzstorage cluster mountpoint.
#
# Related options:
#
# * vzstorage_mount_* group of parameters
# (string value)
#vzstorage_mount_group = qemu
#
# Mount access mode.
#
# This option defines the access bits of Vzstorage cluster mountpoint,
# in the format similar to one of chmod(1) utility, like this: 0770.
# It consists of one to four digits ranging from 0 to 7, with missing
# lead digits assumed to be 0's.
#
# Related options:
#
# * vzstorage_mount_* group of parameters
# (string value)
#vzstorage_mount_perms = 0770
#
# Path to vzstorage client log.
#
# This option defines the log of cluster operations,
# it should include "%(cluster_name)s" template to separate
# logs from multiple shares.
#
# Related options:
#
# * vzstorage_mount_opts may include more detailed logging options.
# (string value)
#vzstorage_log_path = /var/log/vstorage/%(cluster_name)s/nova.log.gz
#
# Path to the SSD cache file.
#
# You can attach an SSD drive to a client and configure the drive to store
# a local cache of frequently accessed data. By having a local cache on a
# client's SSD drive, you can increase the overall cluster performance by
# up to 10 and more times.
# WARNING! There is a lot of SSD models which are not server grade and
# may loose arbitrary set of data changes on power loss.
# Such SSDs should not be used in Vstorage and are dangerous as may lead
# to data corruptions and inconsistencies. Please consult with the manual
# on which SSD models are known to be safe or verify it using
# vstorage-hwflush-check(1) utility.
#
# This option defines the path which should include "%(cluster_name)s"
# template to separate caches from multiple shares.
#
# Related options:
#
# * vzstorage_mount_opts may include more detailed cache options.
# (string value)
#vzstorage_cache_path = <None>
#
# Extra mount options for pstorage-mount
#
# For full description of them, see
# https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
# Format is a python string representation of arguments list, like:
# "['-v', '-R', '500']"
# Shouldn't include -c, -l, -C, -u, -g and -m as those have
# explicit vzstorage_* options.
#
# Related options:
#
# * All other vzstorage_* options
# (list value)
#vzstorage_mount_opts =
[matchmaker_redis]
#
# From oslo.messaging
#
# DEPRECATED: Host to locate redis. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#host = 127.0.0.1
# DEPRECATED: Use this port to connect to redis host. (port value)
# Minimum value: 0
# Maximum value: 65535
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#port = 6379
# DEPRECATED: Password for Redis server (optional). (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#password =
# DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g.,
# [host:port, host1:port ... ] (list value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#sentinel_hosts =
# Redis replica set name. (string value)
#sentinel_group_name = oslo-messaging-zeromq
# Time in ms to wait between connection attempts. (integer value)
#wait_timeout = 2000
# Time in ms to wait before the transaction is killed. (integer value)
#check_timeout = 20000
# Timeout in ms on blocking socket operations. (integer value)
#socket_timeout = 10000
[metrics]
#
# Configuration options for metrics
#
# Options under this group allow to adjust how values assigned to metrics are
# calculated.
#
# From nova.conf
#
#
# When using metrics to weight the suitability of a host, you can use this
# option
# to change how the calculated weight influences the weight assigned to a host
# as
# follows:
#
# * >1.0: increases the effect of the metric on overall weight
# * 1.0: no change to the calculated weight
# * >0.0,<1.0: reduces the effect of the metric on overall weight
# * 0.0: the metric value is ignored, and the value of the
# 'weight_of_unavailable' option is returned instead
# * >-1.0,<0.0: the effect is reduced and reversed
# * -1.0: the effect is reversed
# * <-1.0: the effect is increased proportionally and reversed
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * An integer or float value, where the value corresponds to the multipler
# ratio for this weigher.
#
# Related options:
#
# * weight_of_unavailable
# (floating point value)
#weight_multiplier = 1.0
#
# This setting specifies the metrics to be weighed and the relative ratios for
# each metric. This should be a single string value, consisting of a series of
# one or more 'name=ratio' pairs, separated by commas, where 'name' is the name
# of the metric to be weighed, and 'ratio' is the relative weight for that
# metric.
#
# Note that if the ratio is set to 0, the metric value is ignored, and instead
# the weight will be set to the value of the 'weight_of_unavailable' option.
#
# As an example, let's consider the case where this option is set to:
#
# ``name1=1.0, name2=-1.3``
#
# The final weight will be:
#
# ``(name1.value * 1.0) + (name2.value * -1.3)``
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * A list of zero or more key/value pairs separated by commas, where the key is
# a string representing the name of a metric and the value is a numeric weight
# for that metric. If any value is set to 0, the value is ignored and the
# weight will be set to the value of the 'weight_of_unavailable' option.
#
# Related options:
#
# * weight_of_unavailable
# (list value)
#weight_setting =
#
# This setting determines how any unavailable metrics are treated. If this
# option
# is set to True, any hosts for which a metric is unavailable will raise an
# exception, so it is recommended to also use the MetricFilter to filter out
# those hosts before weighing.
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * True or False, where False ensures any metric being unavailable for a host
# will set the host weight to 'weight_of_unavailable'.
#
# Related options:
#
# * weight_of_unavailable
# (boolean value)
#required = true
#
# When any of the following conditions are met, this value will be used in place
# of any actual metric value:
#
# * One of the metrics named in 'weight_setting' is not available for a host,
# and the value of 'required' is False
# * The ratio specified for a metric in 'weight_setting' is 0
# * The 'weight_multiplier' option is set to 0
#
# This option is only used by the FilterScheduler and its subclasses; if you use
# a different scheduler, this option has no effect.
#
# Possible values:
#
# * An integer or float value, where the value corresponds to the multipler
# ratio for this weigher.
#
# Related options:
#
# * weight_setting
# * required
# * weight_multiplier
# (floating point value)
#weight_of_unavailable = -10000.0
[mks]
#
# Nova compute node uses WebMKS, a desktop sharing protocol to provide
# instance console access to VM's created by VMware hypervisors.
#
# Related options:
# Following options must be set to provide console access.
# * mksproxy_base_url
# * enabled
#
# From nova.conf
#
#
# Location of MKS web console proxy
#
# The URL in the response points to a WebMKS proxy which
# starts proxying between client and corresponding vCenter
# server where instance runs. In order to use the web based
# console access, WebMKS proxy should be installed and configured
#
# Possible values:
#
# * Must be a valid URL of the form:``http://host:port/`` or
# ``https://host:port/``
# (uri value)
#mksproxy_base_url = http://127.0.0.1:6090/
#
# Enables graphical console access for virtual machines.
# (boolean value)
#enabled = false
[neutron]
#
# Configuration options for neutron (network connectivity as a service).
#
# From nova.conf
#
# DEPRECATED:
# This option specifies the URL for connecting to Neutron.
#
# Possible values:
#
# * Any valid URL that points to the Neutron API service is appropriate here.
# This typically matches the URL returned for the 'network' service type
# from the Keystone service catalog.
# (uri value)
# This option is deprecated for removal since 17.0.0.
# Its value may be silently ignored in the future.
# Reason: Endpoint lookup uses the service catalog via common keystoneauth1
# Adapter configuration options. In the current release, "url" will override
# this behavior, but will be ignored and/or removed in a future release. To
# achieve the same result, use the endpoint_override option instead.
#url = http://127.0.0.1:9696
#
# Default name for the Open vSwitch integration bridge.
#
# Specifies the name of an integration bridge interface used by OpenvSwitch.
# This option is only used if Neutron does not specify the OVS bridge name in
# port binding responses.
# (string value)
#ovs_bridge = br-int
#
# Default name for the floating IP pool.
#
# Specifies the name of floating IP pool used for allocating floating IPs. This
# option is only used if Neutron does not specify the floating IP pool name in
# port binding reponses.
# (string value)
#default_floating_pool = nova
#
# Integer value representing the number of seconds to wait before querying
# Neutron for extensions. After this number of seconds the next time Nova
# needs to create a resource in Neutron it will requery Neutron for the
# extensions that it has loaded. Setting value to 0 will refresh the
# extensions with no wait.
# (integer value)
# Minimum value: 0
#extension_sync_interval = 600
#
# Number of times neutronclient should retry on any failed http call.
#
# 0 means connection is attempted only once. Setting it to any positive integer
# means that on failure connection is retried that many times e.g. setting it
# to 3 means total attempts to connect will be 4.
#
# Possible values:
#
# * Any integer value. 0 means connection is attempted only once
# (integer value)
# Minimum value: 0
#http_retries = 3
#
# When set to True, this option indicates that Neutron will be used to proxy
# metadata requests and resolve instance ids. Otherwise, the instance ID must be
# passed to the metadata request in the 'X-Instance-ID' header.
#
# Related options:
#
# * metadata_proxy_shared_secret
# (boolean value)
#service_metadata_proxy = false
#
# This option holds the shared secret string used to validate proxy requests to
# Neutron metadata requests. In order to be used, the
# 'X-Metadata-Provider-Signature' header must be supplied in the request.
#
# Related options:
#
# * service_metadata_proxy
# (string value)
#metadata_proxy_shared_secret =
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# (string value)
#cafile = <None>
# PEM encoded client certificate cert file (string value)
#certfile = <None>
# PEM encoded client certificate key file (string value)
#keyfile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Timeout value for http requests (integer value)
#timeout = <None>
# Authentication type to load (string value)
# Deprecated group/name - [neutron]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (string value)
#auth_section = <None>
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
#project_id = <None>
# Project name to scope to (string value)
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# Trust ID (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User ID (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [neutron]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
# Tenant ID (string value)
#tenant_id = <None>
# Tenant Name (string value)
#tenant_name = <None>
# The default service_type for endpoint URL discovery. (string value)
#service_type = network
# The default service_name for endpoint URL discovery. (string value)
#service_name = <None>
# List of interfaces, in order of preference, for endpoint URL. (list value)
#valid_interfaces = internal,public
# The default region_name for endpoint URL discovery. (string value)
#region_name = <None>
# Always use this endpoint URL for requests for this client. NOTE: The
# unversioned endpoint should be specified here; to request a particular API
# version, use the `version`, `min-version`, and/or `max-version` options.
# (string value)
#endpoint_override = <None>
[notifications]
#
# Most of the actions in Nova which manipulate the system state generate
# notifications which are posted to the messaging component (e.g. RabbitMQ) and
# can be consumed by any service outside the OpenStack. More technical details
# at https://docs.openstack.org/nova/latest/reference/notifications.html
#
# From nova.conf
#
#
# If set, send compute.instance.update notifications on
# instance state changes.
#
# Please refer to
# https://docs.openstack.org/nova/latest/reference/notifications.html for
# additional information on notifications.
#
# Possible values:
#
# * None - no notifications
# * "vm_state" - notifications are sent with VM state transition information in
# the ``old_state`` and ``state`` fields. The ``old_task_state`` and
# ``new_task_state`` fields will be set to the current task_state of the
# instance.
# * "vm_and_task_state" - notifications are sent with VM and task state
# transition information.
# (string value)
# Possible values:
# <None> - <No description provided>
# vm_state - <No description provided>
# vm_and_task_state - <No description provided>
#notify_on_state_change = <None>
# Default notification level for outgoing notifications. (string value)
# Possible values:
# DEBUG - <No description provided>
# INFO - <No description provided>
# WARN - <No description provided>
# ERROR - <No description provided>
# CRITICAL - <No description provided>
# Deprecated group/name - [DEFAULT]/default_notification_level
#default_level = INFO
# DEPRECATED:
# Default publisher_id for outgoing notifications. If you consider routing
# notifications using different publisher, change this value accordingly.
#
# Possible values:
#
# * Defaults to the current hostname of this host, but it can be any valid
# oslo.messaging publisher_id
#
# Related options:
#
# * host - Hostname, FQDN or IP address of this host.
# (string value)
# This option is deprecated for removal since 17.0.0.
# Its value may be silently ignored in the future.
# Reason:
# This option is only used when ``monkey_patch=True`` and
# ``monkey_patch_modules`` is configured to specify the legacy notify_decorator.
# Since the monkey_patch and monkey_patch_modules options are deprecated, this
# option is also deprecated.
#default_publisher_id = $host
#
# Specifies which notification format shall be used by nova.
#
# The default value is fine for most deployments and rarely needs to be changed.
# This value can be set to 'versioned' once the infrastructure moves closer to
# consuming the newer format of notifications. After this occurs, this option
# will be removed.
#
# Note that notifications can be completely disabled by setting ``driver=noop``
# in the ``[oslo_messaging_notifications]`` group.
#
# Possible values:
# * unversioned: Only the legacy unversioned notifications are emitted.
# * versioned: Only the new versioned notifications are emitted.
# * both: Both the legacy unversioned and the new versioned notifications are
# emitted. (Default)
#
# The list of versioned notifications is visible in
# https://docs.openstack.org/nova/latest/reference/notifications.html
# (string value)
# Possible values:
# unversioned - <No description provided>
# versioned - <No description provided>
# both - <No description provided>
#notification_format = both
#
# Specifies the topics for the versioned notifications issued by nova.
#
# The default value is fine for most deployments and rarely needs to be changed.
# However, if you have a third-party service that consumes versioned
# notifications, it might be worth getting a topic for that service.
# Nova will send a message containing a versioned notification payload to each
# topic queue in this list.
#
# The list of versioned notifications is visible in
# https://docs.openstack.org/nova/latest/reference/notifications.html
# (list value)
#versioned_notifications_topics = versioned_notifications
#
# If enabled, include block device information in the versioned notification
# payload. Sending block device information is disabled by default as providing
# that information can incur some overhead on the system since the information
# may need to be loaded from the database.
# (boolean value)
#bdms_in_notifications = false
[osapi_v21]
#
# From nova.conf
#
# DEPRECATED:
# This option is a string representing a regular expression (regex) that matches
# the project_id as contained in URLs. If not set, it will match normal UUIDs
# created by keystone.
#
# Possible values:
#
# * A string representing any legal regular expression
# (string value)
# This option is deprecated for removal since 13.0.0.
# Its value may be silently ignored in the future.
# Reason:
# Recent versions of nova constrain project IDs to hexadecimal characters and
# dashes. If your installation uses IDs outside of this range, you should use
# this option to provide your own regex and give you time to migrate offending
# projects to valid IDs before the next release.
#project_id_regex = <None>
[oslo_concurrency]
#
# From oslo.concurrency
#
# Enables or disables inter-process locks. (boolean value)
#disable_process_locking = false
# Directory to use for lock files. For security, the specified directory should
# only be writable by the user running the processes that need locking. Defaults
# to environment variable OSLO_LOCK_PATH. If external locks are used, a lock
# path must be set. (string value)
#lock_path = <None>
[oslo_messaging_amqp]
#
# From oslo.messaging
#
# Name for the AMQP container. must be globally unique. Defaults to a generated
# UUID (string value)
#container_name = <None>
# Timeout for inactive connections (in seconds) (integer value)
#idle_timeout = 0
# Debug: dump AMQP frames to stdout (boolean value)
#trace = false
# Attempt to connect via SSL. If no other ssl-related parameters are given, it
# will use the system's CA-bundle to verify the server's certificate. (boolean
# value)
#ssl = false
# CA certificate PEM file used to verify the server's certificate (string value)
#ssl_ca_file =
# Self-identifying certificate PEM file for client authentication (string value)
#ssl_cert_file =
# Private key PEM file used to sign ssl_cert_file certificate (optional) (string
# value)
#ssl_key_file =
# Password for decrypting ssl_key_file (if encrypted) (string value)
#ssl_key_password = <None>
# By default SSL checks that the name in the server's certificate matches the
# hostname in the transport_url. In some configurations it may be preferable to
# use the virtual hostname instead, for example if the server uses the Server
# Name Indication TLS extension (rfc6066) to provide a certificate per virtual
# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the
# virtual host name instead of the DNS name. (boolean value)
#ssl_verify_vhost = false
# DEPRECATED: Accept clients using either SSL or plain TCP (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Not applicable - not a SSL server
#allow_insecure_clients = false
# Space separated list of acceptable SASL mechanisms (string value)
#sasl_mechanisms =
# Path to directory that contains the SASL configuration (string value)
#sasl_config_dir =
# Name of configuration file (without .conf suffix) (string value)
#sasl_config_name =
# SASL realm to use if no realm present in username (string value)
#sasl_default_realm =
# DEPRECATED: User name for message broker authentication (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Should use configuration option transport_url to provide the username.
#username =
# DEPRECATED: Password for message broker authentication (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Should use configuration option transport_url to provide the password.
#password =
# Seconds to pause before attempting to re-connect. (integer value)
# Minimum value: 1
#connection_retry_interval = 1
# Increase the connection_retry_interval by this many seconds after each
# unsuccessful failover attempt. (integer value)
# Minimum value: 0
#connection_retry_backoff = 2
# Maximum limit for connection_retry_interval + connection_retry_backoff
# (integer value)
# Minimum value: 1
#connection_retry_interval_max = 30
# Time to pause between re-connecting an AMQP 1.0 link that failed due to a
# recoverable error. (integer value)
# Minimum value: 1
#link_retry_delay = 10
# The maximum number of attempts to re-send a reply message which failed due to
# a recoverable error. (integer value)
# Minimum value: -1
#default_reply_retry = 0
# The deadline for an rpc reply message delivery. (integer value)
# Minimum value: 5
#default_reply_timeout = 30
# The deadline for an rpc cast or call message delivery. Only used when caller
# does not provide a timeout expiry. (integer value)
# Minimum value: 5
#default_send_timeout = 30
# The deadline for a sent notification message delivery. Only used when caller
# does not provide a timeout expiry. (integer value)
# Minimum value: 5
#default_notify_timeout = 30
# The duration to schedule a purge of idle sender links. Detach link after
# expiry. (integer value)
# Minimum value: 1
#default_sender_link_timeout = 600
# Indicates the addressing mode used by the driver.
# Permitted values:
# 'legacy' - use legacy non-routable addressing
# 'routable' - use routable addresses
# 'dynamic' - use legacy addresses if the message bus does not support routing
# otherwise use routable addressing (string value)
#addressing_mode = dynamic
# Enable virtual host support for those message buses that do not natively
# support virtual hosting (such as qpidd). When set to true the virtual host
# name will be added to all message bus addresses, effectively creating a
# private 'subnet' per virtual host. Set to False if the message bus supports
# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative
# as the name of the virtual host. (boolean value)
#pseudo_vhost = true
# address prefix used when sending to a specific server (string value)
#server_request_prefix = exclusive
# address prefix used when broadcasting to all servers (string value)
#broadcast_prefix = broadcast
# address prefix when sending to any server in group (string value)
#group_request_prefix = unicast
# Address prefix for all generated RPC addresses (string value)
#rpc_address_prefix = openstack.org/om/rpc
# Address prefix for all generated Notification addresses (string value)
#notify_address_prefix = openstack.org/om/notify
# Appended to the address prefix when sending a fanout message. Used by the
# message bus to identify fanout messages. (string value)
#multicast_address = multicast
# Appended to the address prefix when sending to a particular RPC/Notification
# server. Used by the message bus to identify messages sent to a single
# destination. (string value)
#unicast_address = unicast
# Appended to the address prefix when sending to a group of consumers. Used by
# the message bus to identify messages that should be delivered in a round-robin
# fashion across consumers. (string value)
#anycast_address = anycast
# Exchange name used in notification addresses.
# Exchange name resolution precedence:
# Target.exchange if set
# else default_notification_exchange if set
# else control_exchange if set
# else 'notify' (string value)
#default_notification_exchange = <None>
# Exchange name used in RPC addresses.
# Exchange name resolution precedence:
# Target.exchange if set
# else default_rpc_exchange if set
# else control_exchange if set
# else 'rpc' (string value)
#default_rpc_exchange = <None>
# Window size for incoming RPC Reply messages. (integer value)
# Minimum value: 1
#reply_link_credit = 200
# Window size for incoming RPC Request messages (integer value)
# Minimum value: 1
#rpc_server_credit = 100
# Window size for incoming Notification messages (integer value)
# Minimum value: 1
#notify_server_credit = 100
# Send messages of this type pre-settled.
# Pre-settled messages will not receive acknowledgement
# from the peer. Note well: pre-settled messages may be
# silently discarded if the delivery fails.
# Permitted values:
# 'rpc-call' - send RPC Calls pre-settled
# 'rpc-reply'- send RPC Replies pre-settled
# 'rpc-cast' - Send RPC Casts pre-settled
# 'notify' - Send Notifications pre-settled
# (multi valued)
#pre_settled = rpc-cast
#pre_settled = rpc-reply
[oslo_messaging_kafka]
#
# From oslo.messaging
#
# DEPRECATED: Default Kafka broker Host (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#kafka_default_host = localhost
# DEPRECATED: Default Kafka broker Port (port value)
# Minimum value: 0
# Maximum value: 65535
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#kafka_default_port = 9092
# Max fetch bytes of Kafka consumer (integer value)
#kafka_max_fetch_bytes = 1048576
# Default timeout(s) for Kafka consumers (floating point value)
#kafka_consumer_timeout = 1.0
# DEPRECATED: Pool Size for Kafka Consumers (integer value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#pool_size = 10
# DEPRECATED: The pool size limit for connections expiration policy (integer
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#conn_pool_min_size = 2
# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#conn_pool_ttl = 1200
# Group id for Kafka consumer. Consumers in one group will coordinate message
# consumption (string value)
#consumer_group = oslo_messaging_consumer
# Upper bound on the delay for KafkaProducer batching in seconds (floating point
# value)
#producer_batch_timeout = 0.0
# Size of batch for the producer async send (integer value)
#producer_batch_size = 16384
[oslo_messaging_notifications]
#
# From oslo.messaging
#
# The Drivers(s) to handle sending notifications. Possible values are messaging,
# messagingv2, routing, log, test, noop (multi valued)
# Deprecated group/name - [DEFAULT]/notification_driver
#driver =
# A URL representing the messaging driver to use for notifications. If not set,
# we fall back to the same configuration used for RPC. (string value)
# Deprecated group/name - [DEFAULT]/notification_transport_url
#transport_url = <None>
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# Deprecated group/name - [DEFAULT]/notification_topics
#topics = notifications
# The maximum number of attempts to re-send a notification message which failed
# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite
# (integer value)
#retry = -1
[oslo_messaging_rabbit]
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_durable_queues
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
#amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
#amqp_auto_delete = false
# Enable SSL (boolean value)
#ssl = <None>
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version
#ssl_version =
# SSL key file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile
#ssl_key_file =
# SSL cert file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile
#ssl_cert_file =
# SSL certification authority file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs
#ssl_ca_file =
# How long to wait before reconnecting in response to an AMQP consumer cancel
# notification. (floating point value)
#kombu_reconnect_delay = 1.0
# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
# be used. This option may not be available in future versions. (string value)
#kombu_compression = <None>
# How long to wait a missing client before abandoning to send it its replies.
# This value should not be longer than rpc_response_timeout. (integer value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout
#kombu_missing_consumer_retry_timeout = 60
# Determines how the next RabbitMQ node is chosen in case the one we are
# currently connected to becomes unavailable. Takes effect only if more than one
# RabbitMQ node is provided in config. (string value)
# Possible values:
# round-robin - <No description provided>
# shuffle - <No description provided>
#kombu_failover_strategy = round-robin
# DEPRECATED: The RabbitMQ broker address where a single node is used. (string
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#rabbit_host = localhost
# DEPRECATED: The RabbitMQ broker port where a single node is used. (port value)
# Minimum value: 0
# Maximum value: 65535
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#rabbit_port = 5672
# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#rabbit_hosts = $rabbit_host:$rabbit_port
# DEPRECATED: The RabbitMQ userid. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#rabbit_userid = guest
# DEPRECATED: The RabbitMQ password. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#rabbit_password = guest
# The RabbitMQ login method. (string value)
# Possible values:
# PLAIN - <No description provided>
# AMQPLAIN - <No description provided>
# RABBIT-CR-DEMO - <No description provided>
#rabbit_login_method = AMQPLAIN
# DEPRECATED: The RabbitMQ virtual host. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Replaced by [DEFAULT]/transport_url
#rabbit_virtual_host = /
# How frequently to retry connecting with RabbitMQ. (integer value)
#rabbit_retry_interval = 1
# How long to backoff for between retries when connecting to RabbitMQ. (integer
# value)
#rabbit_retry_backoff = 2
# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
# (integer value)
#rabbit_interval_max = 30
# DEPRECATED: Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#rabbit_max_retries = 0
# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
# is no longer controlled by the x-ha-policy argument when declaring a queue. If
# you just want to make sure that all queues (except those with auto-generated
# names) are mirrored across all nodes, run: "rabbitmqctl set_policy HA
# '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
#rabbit_ha_queues = false
# Positive integer representing duration in seconds for queue TTL (x-expires).
# Queues which are unused for the duration of the TTL are automatically deleted.
# The parameter affects only reply and fanout queues. (integer value)
# Minimum value: 1
#rabbit_transient_queues_ttl = 1800
# Specifies the number of messages to prefetch. Setting to zero allows unlimited
# messages. (integer value)
#rabbit_qos_prefetch_count = 0
# Number of seconds after which the Rabbit broker is considered down if
# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
# value)
#heartbeat_timeout_threshold = 60
# How often times during the heartbeat_timeout_threshold we check the heartbeat.
# (integer value)
#heartbeat_rate = 2
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
#fake_rabbit = false
# Maximum number of channels to allow (integer value)
#channel_max = <None>
# The maximum byte size for an AMQP frame (integer value)
#frame_max = <None>
# How often to send heartbeats for consumer's connections (integer value)
#heartbeat_interval = 3
# Arguments passed to ssl.wrap_socket (dict value)
#ssl_options = <None>
# Set socket timeout in seconds for connection's socket (floating point value)
#socket_timeout = 0.25
# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point value)
#tcp_user_timeout = 0.25
# Set delay for reconnection to some host which has connection error (floating
# point value)
#host_connection_reconnect_delay = 0.25
# Connection factory implementation (string value)
# Possible values:
# new - <No description provided>
# single - <No description provided>
# read_write - <No description provided>
#connection_factory = single
# Maximum number of connections to keep queued. (integer value)
#pool_max_size = 30
# Maximum number of connections to create above `pool_max_size`. (integer value)
#pool_max_overflow = 0
# Default number of seconds to wait for a connections to available (integer
# value)
#pool_timeout = 30
# Lifetime of a connection (since creation) in seconds or None for no recycling.
# Expired connections are closed on acquire. (integer value)
#pool_recycle = 600
# Threshold at which inactive (since release) connections are considered stale
# in seconds or None for no staleness. Stale connections are closed on acquire.
# (integer value)
#pool_stale = 60
# Default serialization mechanism for serializing/deserializing
# outgoing/incoming messages (string value)
# Possible values:
# json - <No description provided>
# msgpack - <No description provided>
#default_serializer_type = json
# Persist notification messages. (boolean value)
#notification_persistence = false
# Exchange name for sending notifications (string value)
#default_notification_exchange = ${control_exchange}_notification
# Max number of not acknowledged message which RabbitMQ can send to notification
# listener. (integer value)
#notification_listener_prefetch_count = 100
# Reconnecting retry count in case of connectivity problem during sending
# notification, -1 means infinite retry. (integer value)
#default_notification_retry_attempts = -1
# Reconnecting retry delay in case of connectivity problem during sending
# notification message (floating point value)
#notification_retry_delay = 0.25
# Time to live for rpc queues without consumers in seconds. (integer value)
#rpc_queue_expiration = 60
# Exchange name for sending RPC messages (string value)
#default_rpc_exchange = ${control_exchange}_rpc
# Exchange name for receiving RPC replies (string value)
#rpc_reply_exchange = ${control_exchange}_rpc_reply
# Max number of not acknowledged message which RabbitMQ can send to rpc
# listener. (integer value)
#rpc_listener_prefetch_count = 100
# Max number of not acknowledged message which RabbitMQ can send to rpc reply
# listener. (integer value)
#rpc_reply_listener_prefetch_count = 100
# Reconnecting retry count in case of connectivity problem during sending reply.
# -1 means infinite retry during rpc_timeout (integer value)
#rpc_reply_retry_attempts = -1
# Reconnecting retry delay in case of connectivity problem during sending reply.
# (floating point value)
#rpc_reply_retry_delay = 0.25
# Reconnecting retry count in case of connectivity problem during sending RPC
# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
# request could be processed more than one time (integer value)
#default_rpc_retry_attempts = -1
# Reconnecting retry delay in case of connectivity problem during sending RPC
# message (floating point value)
#rpc_retry_delay = 0.25
[oslo_messaging_zmq]
#
# From oslo.messaging
#
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address. (string value)
#rpc_zmq_bind_address = *
# MatchMaker driver. (string value)
# Possible values:
# redis - <No description provided>
# sentinel - <No description provided>
# dummy - <No description provided>
#rpc_zmq_matchmaker = redis
# Number of ZeroMQ contexts, defaults to 1. (integer value)
#rpc_zmq_contexts = 1
# Maximum number of ingress messages to locally buffer per topic. Default is
# unlimited. (integer value)
#rpc_zmq_topic_backlog = <None>
# Directory for holding IPC sockets. (string value)
#rpc_zmq_ipc_dir = /var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
# "host" option, if running Nova. (string value)
#rpc_zmq_host = localhost
# Number of seconds to wait before all pending messages will be sent after
# closing a socket. The default value of -1 specifies an infinite linger period.
# The value of 0 specifies no linger period. Pending messages shall be discarded
# immediately when the socket is closed. Positive values specify an upper bound
# for the linger period. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_cast_timeout
#zmq_linger = -1
# The default number of seconds that poll should wait. Poll raises timeout
# exception when timeout expired. (integer value)
#rpc_poll_timeout = 1
# Expiration timeout in seconds of a name service record about existing target (
# < 0 means no timeout). (integer value)
#zmq_target_expire = 300
# Update period in seconds of a name service record about existing target.
# (integer value)
#zmq_target_update = 180
# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
# value)
#use_pub_sub = false
# Use ROUTER remote proxy. (boolean value)
#use_router_proxy = false
# This option makes direct connections dynamic or static. It makes sense only
# with use_router_proxy=False which means to use direct connections for direct
# message types (ignored otherwise). (boolean value)
#use_dynamic_connections = false
# How many additional connections to a host will be made for failover reasons.
# This option is actual only in dynamic connections mode. (integer value)
#zmq_failover_connections = 2
# Minimal port number for random ports range. (port value)
# Minimum value: 0
# Maximum value: 65535
#rpc_zmq_min_port = 49153
# Maximal port number for random ports range. (integer value)
# Minimum value: 1
# Maximum value: 65536
#rpc_zmq_max_port = 65536
# Number of retries to find free port number before fail with ZMQBindError.
# (integer value)
#rpc_zmq_bind_port_retries = 100
# Default serialization mechanism for serializing/deserializing
# outgoing/incoming messages (string value)
# Possible values:
# json - <No description provided>
# msgpack - <No description provided>
#rpc_zmq_serialization = json
# This option configures round-robin mode in zmq socket. True means not keeping
# a queue when server side disconnects. False means to keep queue and messages
# even if server is disconnected, when the server appears we send all
# accumulated messages to it. (boolean value)
#zmq_immediate = true
# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any
# other negative value) means to skip any overrides and leave it to OS default;
# 0 and 1 (or any other positive value) mean to disable and enable the option
# respectively. (integer value)
#zmq_tcp_keepalive = -1
# The duration between two keepalive transmissions in idle condition. The unit
# is platform dependent, for example, seconds in Linux, milliseconds in Windows
# etc. The default value of -1 (or any other negative value and 0) means to skip
# any overrides and leave it to OS default. (integer value)
#zmq_tcp_keepalive_idle = -1
# The number of retransmissions to be carried out before declaring that remote
# end is not available. The default value of -1 (or any other negative value and
# 0) means to skip any overrides and leave it to OS default. (integer value)
#zmq_tcp_keepalive_cnt = -1
# The duration between two successive keepalive retransmissions, if
# acknowledgement to the previous keepalive transmission is not received. The
# unit is platform dependent, for example, seconds in Linux, milliseconds in
# Windows etc. The default value of -1 (or any other negative value and 0) means
# to skip any overrides and leave it to OS default. (integer value)
#zmq_tcp_keepalive_intvl = -1
# Maximum number of (green) threads to work concurrently. (integer value)
#rpc_thread_pool_size = 100
# Expiration timeout in seconds of a sent/received message after which it is not
# tracked anymore by a client/server. (integer value)
#rpc_message_ttl = 300
# Wait for message acknowledgements from receivers. This mechanism works only
# via proxy without PUB/SUB. (boolean value)
#rpc_use_acks = false
# Number of seconds to wait for an ack from a cast/call. After each retry
# attempt this timeout is multiplied by some specified multiplier. (integer
# value)
#rpc_ack_timeout_base = 15
# Number to multiply base ack timeout by after each retry attempt. (integer
# value)
#rpc_ack_timeout_multiplier = 2
# Default number of message sending attempts in case of any problems occurred:
# positive value N means at most N retries, 0 means no retries, None or -1 (or
# any other negative values) mean to retry forever. This option is used only if
# acknowledgments are enabled. (integer value)
#rpc_retry_attempts = 3
# List of publisher hosts SubConsumer can subscribe on. This option has higher
# priority then the default publishers list taken from the matchmaker. (list
# value)
#subscribe_on =
[oslo_middleware]
#
# From oslo.middleware
#
# The maximum body size for each request, in bytes. (integer value)
# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
# Deprecated group/name - [DEFAULT]/max_request_body_size
#max_request_body_size = 114688
# DEPRECATED: The HTTP Header that will be used to determine what the original
# request protocol scheme was, even if it was hidden by a SSL termination proxy.
# (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#secure_proxy_ssl_header = X-Forwarded-Proto
# Whether the application is behind a proxy or not. This determines if the
# middleware should parse the headers or not. (boolean value)
#enable_proxy_headers_parsing = false
[oslo_policy]
#
# From oslo.policy
#
# This option controls whether or not to enforce scope when evaluating policies.
# If ``True``, the scope of the token used in the request is compared to the
# ``scope_types`` of the policy being enforced. If the scopes do not match, an
# ``InvalidScope`` exception will be raised. If ``False``, a message will be
# logged informing operators that policies are being invoked with mismatching
# scope. (boolean value)
#enforce_scope = false
# The file that defines policies. (string value)
#policy_file = policy.json
# Default rule. Enforced when a requested rule is not found. (string value)
#policy_default_rule = default
# Directories where policy configuration files are stored. They can be relative
# to any directory in the search path defined by the config_dir option, or
# absolute paths. The file defined by policy_file must exist for these
# directories to be searched. Missing or empty directories are ignored. (multi
# valued)
#policy_dirs = policy.d
# Content Type to send and receive data for REST based policy check (string
# value)
# Possible values:
# application/x-www-form-urlencoded - <No description provided>
# application/json - <No description provided>
#remote_content_type = application/x-www-form-urlencoded
# server identity verification for REST based policy check (boolean value)
#remote_ssl_verify_server_crt = false
# Absolute path to ca cert file for REST based policy check (string value)
#remote_ssl_ca_crt_file = <None>
# Absolute path to client cert for REST based policy check (string value)
#remote_ssl_client_crt_file = <None>
# Absolute path client key file REST based policy check (string value)
#remote_ssl_client_key_file = <None>
[pci]
#
# From nova.conf
#
#
# An alias for a PCI passthrough device requirement.
#
# This allows users to specify the alias in the extra specs for a flavor,
# without
# needing to repeat all the PCI property requirements.
#
# Possible Values:
#
# * A list of JSON values which describe the aliases. For example::
#
# alias = {
# "name": "QuickAssist",
# "product_id": "0443",
# "vendor_id": "8086",
# "device_type": "type-PCI",
# "numa_policy": "required"
# }
#
# This defines an alias for the Intel QuickAssist card. (multi valued). Valid
# key values are :
#
# ``name``
# Name of the PCI alias.
#
# ``product_id``
# Product ID of the device in hexadecimal.
#
# ``vendor_id``
# Vendor ID of the device in hexadecimal.
#
# ``device_type``
# Type of PCI device. Valid values are: ``type-PCI``, ``type-PF`` and
# ``type-VF``.
#
# ``numa_policy``
# Required NUMA affinity of device. Valid values are: ``legacy``,
# ``preferred`` and ``required``.
# (multi valued)
# Deprecated group/name - [DEFAULT]/pci_alias
#alias =
#
# White list of PCI devices available to VMs.
#
# Possible values:
#
# * A JSON dictionary which describe a whitelisted PCI device. It should take
# the following format:
#
# ["vendor_id": "<id>",] ["product_id": "<id>",]
# ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
# "devname": "<name>",]
# {"<tag>": "<tag_value>",}
#
# Where '[' indicates zero or one occurrences, '{' indicates zero or multiple
# occurrences, and '|' mutually exclusive options. Note that any missing
# fields are automatically wildcarded.
#
# Valid key values are :
#
# * "vendor_id": Vendor ID of the device in hexadecimal.
# * "product_id": Product ID of the device in hexadecimal.
# * "address": PCI address of the device.
# * "devname": Device name of the device (for e.g. interface name). Not all
# PCI devices have a name.
# * "<tag>": Additional <tag> and <tag_value> used for matching PCI devices.
# Supported <tag>: "physical_network".
#
# The address key supports traditional glob style and regular expression
# syntax. Valid examples are:
#
# passthrough_whitelist = {"devname":"eth0",
# "physical_network":"physnet"}
# passthrough_whitelist = {"address":"*:0a:00.*"}
# passthrough_whitelist = {"address":":0a:00.",
# "physical_network":"physnet1"}
# passthrough_whitelist = {"vendor_id":"1137",
# "product_id":"0071"}
# passthrough_whitelist = {"vendor_id":"1137",
# "product_id":"0071",
# "address": "0000:0a:00.1",
# "physical_network":"physnet1"}
# passthrough_whitelist = {"address":{"domain": ".*",
# "bus": "02", "slot": "01",
# "function": "[2-7]"},
# "physical_network":"physnet1"}
# passthrough_whitelist = {"address":{"domain": ".*",
# "bus": "02", "slot": "0[1-2]",
# "function": ".*"},
# "physical_network":"physnet1"}
#
# The following are invalid, as they specify mutually exclusive options:
#
# passthrough_whitelist = {"devname":"eth0",
# "physical_network":"physnet",
# "address":"*:0a:00.*"}
#
# * A JSON list of JSON dictionaries corresponding to the above format. For
# example:
#
# passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"},
# {"product_id":"0002", "vendor_id":"8086"}]
# (multi valued)
# Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist
#passthrough_whitelist =
[placement]
#
# From nova.conf
#
# DEPRECATED:
# Region name of this node. This is used when picking the URL in the service
# catalog.
#
# Possible values:
#
# * Any string representing region name
# (string value)
# This option is deprecated for removal since 17.0.0.
# Its value may be silently ignored in the future.
# Reason: Endpoint lookup uses the service catalog via common keystoneauth1
# Adapter configuration options. Use the region_name option instead.
#os_region_name = <None>
# DEPRECATED:
# Endpoint interface for this node. This is used when picking the URL in the
# service catalog.
# (string value)
# This option is deprecated for removal since 17.0.0.
# Its value may be silently ignored in the future.
# Reason: Endpoint lookup uses the service catalog via common keystoneauth1
# Adapter configuration options. Use the valid_interfaces option instead.
#os_interface = <None>
#
# If True, when limiting allocation candidate results, the results will be
# a random sampling of the full result set. If False, allocation candidates
# are returned in a deterministic but undefined order. That is, all things
# being equal, two requests for allocation candidates will return the same
# results in the same order; but no guarantees are made as to how that order
# is determined.
# (boolean value)
#randomize_allocation_candidates = false
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# (string value)
#cafile = <None>
# PEM encoded client certificate cert file (string value)
#certfile = <None>
# PEM encoded client certificate key file (string value)
#keyfile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Timeout value for http requests (integer value)
#timeout = <None>
# Authentication type to load (string value)
# Deprecated group/name - [placement]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (string value)
#auth_section = <None>
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
#project_id = <None>
# Project name to scope to (string value)
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# Trust ID (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User ID (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [placement]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
# Tenant ID (string value)
#tenant_id = <None>
# Tenant Name (string value)
#tenant_name = <None>
# The default service_type for endpoint URL discovery. (string value)
#service_type = placement
# The default service_name for endpoint URL discovery. (string value)
#service_name = <None>
# List of interfaces, in order of preference, for endpoint URL. (list value)
# Deprecated group/name - [placement]/os_interface
#valid_interfaces = internal,public
# The default region_name for endpoint URL discovery. (string value)
# Deprecated group/name - [placement]/os_region_name
#region_name = <None>
# Always use this endpoint URL for requests for this client. NOTE: The
# unversioned endpoint should be specified here; to request a particular API
# version, use the `version`, `min-version`, and/or `max-version` options.
# (string value)
#endpoint_override = <None>
[quota]
#
# Quota options allow to manage quotas in openstack deployment.
#
# From nova.conf
#
#
# The number of instances allowed per project.
#
# Possible Values
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_instances
#instances = 10
#
# The number of instance cores or vCPUs allowed per project.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_cores
#cores = 20
#
# The number of megabytes of instance RAM allowed per project.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_ram
#ram = 51200
# DEPRECATED:
# The number of floating IPs allowed per project.
#
# Floating IPs are not allocated to instances by default. Users need to select
# them from the pool configured by the OpenStack administrator to attach to
# their
# instances.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_floating_ips
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#floating_ips = 10
# DEPRECATED:
# The number of fixed IPs allowed per project.
#
# Unlike floating IPs, fixed IPs are allocated dynamically by the network
# component when instances boot up. This quota value should be at least the
# number of instances allowed
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_fixed_ips
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#fixed_ips = -1
#
# The number of metadata items allowed per instance.
#
# Users can associate metadata with an instance during instance creation. This
# metadata takes the form of key-value pairs.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_metadata_items
#metadata_items = 128
#
# The number of injected files allowed.
#
# File injection allows users to customize the personality of an instance by
# injecting data into it upon boot. Only text file injection is permitted:
# binary
# or ZIP files are not accepted. During file injection, any existing files that
# match specified files are renamed to include ``.bak`` extension appended with
# a
# timestamp.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_injected_files
#injected_files = 5
#
# The number of bytes allowed per injected file.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes
#injected_file_content_bytes = 10240
#
# The maximum allowed injected file path length.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_injected_file_path_length
#injected_file_path_length = 255
# DEPRECATED:
# The number of security groups per project.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_security_groups
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#security_groups = 10
# DEPRECATED:
# The number of security rules per security group.
#
# The associated rules in each security group control the traffic to instances
# in
# the group.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_security_group_rules
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# nova-network is deprecated, as are any related configuration options.
#security_group_rules = 20
#
# The maximum number of key pairs allowed per user.
#
# Users can create at least one key pair for each project and use the key pair
# for multiple instances that belong to that project.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_key_pairs
#key_pairs = 100
#
# The maxiumum number of server groups per project.
#
# Server groups are used to control the affinity and anti-affinity scheduling
# policy for a group of servers or instances. Reducing the quota will not affect
# any existing group, but new servers will not be allowed into groups that have
# become over quota.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_server_groups
#server_groups = 10
#
# The maximum number of servers per server group.
#
# Possible values:
#
# * A positive integer or 0.
# * -1 to disable the quota.
# (integer value)
# Minimum value: -1
# Deprecated group/name - [DEFAULT]/quota_server_group_members
#server_group_members = 10
#
# The number of seconds until a reservation expires.
#
# This quota represents the time period for invalidating quota reservations.
# (integer value)
#reservation_expire = 86400
#
# The count of reservations until usage is refreshed.
#
# This defaults to 0 (off) to avoid additional load but it is useful to turn on
# to help keep quota usage up-to-date and reduce the impact of out of sync usage
# issues.
# (integer value)
# Minimum value: 0
#until_refresh = 0
#
# The number of seconds between subsequent usage refreshes.
#
# This defaults to 0 (off) to avoid additional load but it is useful to turn on
# to help keep quota usage up-to-date and reduce the impact of out of sync usage
# issues. Note that quotas are not updated on a periodic task, they will update
# on a new reservation if max_age has passed since the last reservation.
# (integer value)
# Minimum value: 0
#max_age = 0
# DEPRECATED:
# The quota enforcer driver.
#
# Provides abstraction for quota checks. Users can configure a specific
# driver to use for quota checks.
#
# Possible values:
#
# * nova.quota.DbQuotaDriver (default) or any string representing fully
# qualified class name.
# (string value)
# Deprecated group/name - [DEFAULT]/quota_driver
# This option is deprecated for removal since 14.0.0.
# Its value may be silently ignored in the future.
#driver = nova.quota.DbQuotaDriver
#
# Recheck quota after resource creation to prevent allowing quota to be
# exceeded.
#
# This defaults to True (recheck quota after resource creation) but can be set
# to
# False to avoid additional load if allowing quota to be exceeded because of
# racing requests is considered acceptable. For example, when set to False, if a
# user makes highly parallel REST API requests to create servers, it will be
# possible for them to create more servers than their allowed quota during the
# race. If their quota is 10 servers, they might be able to create 50 during the
# burst. After the burst, they will not be able to create any more servers but
# they will be able to keep their 50 servers until they delete them.
#
# The initial quota check is done before resources are created, so if multiple
# parallel requests arrive at the same time, all could pass the quota check and
# create resources, potentially exceeding quota. When recheck_quota is True,
# quota will be checked a second time after resources have been created and if
# the resource is over quota, it will be deleted and OverQuota will be raised,
# usually resulting in a 403 response to the REST API user. This makes it
# impossible for a user to exceed their quota with the caveat that it will,
# however, be possible for a REST API user to be rejected with a 403 response in
# the event of a collision close to reaching their quota limit, even if the user
# has enough quota available when they made the request.
# (boolean value)
#recheck_quota = true
[rdp]
#
# Options under this group enable and configure Remote Desktop Protocol (
# RDP) related features.
#
# This group is only relevant to Hyper-V users.
#
# From nova.conf
#
#
# Enable Remote Desktop Protocol (RDP) related features.
#
# Hyper-V, unlike the majority of the hypervisors employed on Nova compute
# nodes, uses RDP instead of VNC and SPICE as a desktop sharing protocol to
# provide instance console access. This option enables RDP for graphical
# console access for virtual machines created by Hyper-V.
#
# **Note:** RDP should only be enabled on compute nodes that support the Hyper-V
# virtualization platform.
#
# Related options:
#
# * ``compute_driver``: Must be hyperv.
#
# (boolean value)
#enabled = false
#
# The URL an end user would use to connect to the RDP HTML5 console proxy.
# The console proxy service is called with this token-embedded URL and
# establishes the connection to the proper instance.
#
# An RDP HTML5 console proxy service will need to be configured to listen on the
# address configured here. Typically the console proxy service would be run on a
# controller node. The localhost address used as default would only work in a
# single node environment i.e. devstack.
#
# An RDP HTML5 proxy allows a user to access via the web the text or graphical
# console of any Windows server or workstation using RDP. RDP HTML5 console
# proxy services include FreeRDP, wsgate.
# See https://github.com/FreeRDP/FreeRDP-WebConnect
#
# Possible values:
#
# * <scheme>://<ip-address>:<port-number>/
#
# The scheme must be identical to the scheme configured for the RDP HTML5
# console proxy service. It is ``http`` or ``https``.
#
# The IP address must be identical to the address on which the RDP HTML5
# console proxy service is listening.
#
# The port must be identical to the port on which the RDP HTML5 console proxy
# service is listening.
#
# Related options:
#
# * ``rdp.enabled``: Must be set to ``True`` for ``html5_proxy_base_url`` to be
# effective.
# (uri value)
#html5_proxy_base_url = http://127.0.0.1:6083/
[remote_debug]
#
# From nova.conf
#
# Warning: Failed to format sample for host
# unhashable type: 'HostAddress'
#
# Debug port to connect to. This command line parameter allows you to specify
# the port you want to use to connect to a nova service via a debugger running
# on different host.
#
# Note that using the remote debug option changes how Nova uses the eventlet
# library to support async IO. This could result in failures that do not occur
# under normal operation. Use at your own risk.
#
# Possible Values:
#
# * Port number you want to use as a command line parameter
# to a nova service. For Example:
#
# /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
# --remote_debug-host <IP address where the debugger is running>
# --remote_debug-port <port> it's listening on>.
# (port value)
# Minimum value: 0
# Maximum value: 65535
#port = <None>
[scheduler]
#
# From nova.conf
#
#
# The scheduler host manager to use.
#
# The host manager manages the in-memory picture of the hosts that the scheduler
# uses. The options values are chosen from the entry points under the namespace
# 'nova.scheduler.host_manager' in 'setup.cfg'.
#
# NOTE: The "ironic_host_manager" option is deprecated as of the 17.0.0 Queens
# release.
# (string value)
# Possible values:
# host_manager - <No description provided>
# ironic_host_manager - <No description provided>
# Deprecated group/name - [DEFAULT]/scheduler_host_manager
#host_manager = host_manager
#
# The class of the driver used by the scheduler. This should be chosen from one
# of the entrypoints under the namespace 'nova.scheduler.driver' of file
# 'setup.cfg'. If nothing is specified in this option, the 'filter_scheduler' is
# used.
#
# Other options are:
#
# * 'caching_scheduler' which aggressively caches the system state for better
# individual scheduler performance at the risk of more retries when running
# multiple schedulers. [DEPRECATED]
# * 'chance_scheduler' which simply picks a host at random. [DEPRECATED]
# * 'fake_scheduler' which is used for testing.
#
# Possible values:
#
# * Any of the drivers included in Nova:
# ** filter_scheduler
# ** caching_scheduler
# ** chance_scheduler
# ** fake_scheduler
# * You may also set this to the entry point name of a custom scheduler driver,
# but you will be responsible for creating and maintaining it in your
# setup.cfg
# file.
# (string value)
# Deprecated group/name - [DEFAULT]/scheduler_driver
#driver = filter_scheduler
#
# Periodic task interval.
#
# This value controls how often (in seconds) to run periodic tasks in the
# scheduler. The specific tasks that are run for each period are determined by
# the particular scheduler being used.
#
# If this is larger than the nova-service 'service_down_time' setting, Nova may
# report the scheduler service as down. This is because the scheduler driver is
# responsible for sending a heartbeat and it will only do that as often as this
# option allows. As each scheduler can work a little differently than the
# others,
# be sure to test this with your selected scheduler.
#
# Possible values:
#
# * An integer, where the integer corresponds to periodic task interval in
# seconds. 0 uses the default interval (60 seconds). A negative value disables
# periodic tasks.
#
# Related options:
#
# * ``nova-service service_down_time``
# (integer value)
# Deprecated group/name - [DEFAULT]/scheduler_driver_task_period
#periodic_task_interval = 60
#
# This is the maximum number of attempts that will be made for a given instance
# build/move operation. It limits the number of alternate hosts returned by the
# scheduler. When that list of hosts is exhausted, a MaxRetriesExceeded
# exception is raised and the instance is set to an error state.
#
# Possible values:
#
# * A positive integer, where the integer corresponds to the max number of
# attempts that can be made when building or moving an instance.
# (integer value)
# Minimum value: 1
# Deprecated group/name - [DEFAULT]/scheduler_max_attempts
#max_attempts = 3
#
# Periodic task interval.
#
# This value controls how often (in seconds) the scheduler should attempt
# to discover new hosts that have been added to cells. If negative (the
# default), no automatic discovery will occur.
#
# Deployments where compute nodes come and go frequently may want this
# enabled, where others may prefer to manually discover hosts when one
# is added to avoid any overhead from constantly checking. If enabled,
# every time this runs, we will select any unmapped hosts out of each
# cell database on every run.
# (integer value)
# Minimum value: -1
#discover_hosts_in_cells_interval = -1
#
# This setting determines the maximum limit on results received from the
# placement service during a scheduling operation. It effectively limits
# the number of hosts that may be considered for scheduling requests that
# match a large number of candidates.
#
# A value of 1 (the minimum) will effectively defer scheduling to the placement
# service strictly on "will it fit" grounds. A higher value will put an upper
# cap on the number of results the scheduler will consider during the filtering
# and weighing process. Large deployments may need to set this lower than the
# total number of hosts available to limit memory consumption, network traffic,
# etc. of the scheduler.
#
# This option is only used by the FilterScheduler; if you use a different
# scheduler, this option has no effect.
# (integer value)
# Minimum value: 1
#max_placement_results = 1000
[serial_console]
#
# The serial console feature allows you to connect to a guest in case a
# graphical console like VNC, RDP or SPICE is not available. This is only
# currently supported for the libvirt, Ironic and hyper-v drivers.
#
# From nova.conf
#
#
# Enable the serial console feature.
#
# In order to use this feature, the service ``nova-serialproxy`` needs to run.
# This service is typically executed on the controller node.
# (boolean value)
#enabled = false
#
# A range of TCP ports a guest can use for its backend.
#
# Each instance which gets created will use one port out of this range. If the
# range is not big enough to provide another port for an new instance, this
# instance won't get launched.
#
# Possible values:
#
# * Each string which passes the regex ``\d+:\d+`` For example ``10000:20000``.
# Be sure that the first port number is lower than the second port number
# and that both are in range from 0 to 65535.
# (string value)
#port_range = 10000:20000
#
# The URL an end user would use to connect to the ``nova-serialproxy`` service.
#
# The ``nova-serialproxy`` service is called with this token enriched URL
# and establishes the connection to the proper instance.
#
# Related options:
#
# * The IP address must be identical to the address to which the
# ``nova-serialproxy`` service is listening (see option ``serialproxy_host``
# in this section).
# * The port must be the same as in the option ``serialproxy_port`` of this
# section.
# * If you choose to use a secured websocket connection, then start this option
# with ``wss://`` instead of the unsecured ``ws://``. The options ``cert``
# and ``key`` in the ``[DEFAULT]`` section have to be set for that.
# (uri value)
#base_url = ws://127.0.0.1:6083/
#
# The IP address to which proxy clients (like ``nova-serialproxy``) should
# connect to get the serial console of an instance.
#
# This is typically the IP address of the host of a ``nova-compute`` service.
# (string value)
#proxyclient_address = 127.0.0.1
#
# The IP address which is used by the ``nova-serialproxy`` service to listen
# for incoming requests.
#
# The ``nova-serialproxy`` service listens on this IP address for incoming
# connection requests to instances which expose serial console.
#
# Related options:
#
# * Ensure that this is the same IP address which is defined in the option
# ``base_url`` of this section or use ``0.0.0.0`` to listen on all addresses.
# (string value)
#serialproxy_host = 0.0.0.0
#
# The port number which is used by the ``nova-serialproxy`` service to listen
# for incoming requests.
#
# The ``nova-serialproxy`` service listens on this port number for incoming
# connection requests to instances which expose serial console.
#
# Related options:
#
# * Ensure that this is the same port number which is defined in the option
# ``base_url`` of this section.
# (port value)
# Minimum value: 0
# Maximum value: 65535
#serialproxy_port = 6083
[service_user]
#
# Configuration options for service to service authentication using a service
# token. These options allow sending a service token along with the user's token
# when contacting external REST APIs.
#
# From nova.conf
#
#
# When True, if sending a user token to a REST API, also send a service token.
#
# Nova often reuses the user token provided to the nova-api to talk to other
# REST
# APIs, such as Cinder, Glance and Neutron. It is possible that while the user
# token was valid when the request was made to Nova, the token may expire before
# it reaches the other service. To avoid any failures, and to make it clear it
# is
# Nova calling the service on the user's behalf, we include a service token
# along
# with the user token. Should the user's token have expired, a valid service
# token ensures the REST API request will still be accepted by the keystone
# middleware.
# (boolean value)
#send_service_user_token = false
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# (string value)
#cafile = <None>
# PEM encoded client certificate cert file (string value)
#certfile = <None>
# PEM encoded client certificate key file (string value)
#keyfile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Timeout value for http requests (integer value)
#timeout = <None>
# Authentication type to load (string value)
# Deprecated group/name - [service_user]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (string value)
#auth_section = <None>
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
#project_id = <None>
# Project name to scope to (string value)
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# Trust ID (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User ID (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [service_user]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
# Tenant ID (string value)
#tenant_id = <None>
# Tenant Name (string value)
#tenant_name = <None>
[spice]
#
# SPICE console feature allows you to connect to a guest virtual machine.
# SPICE is a replacement for fairly limited VNC protocol.
#
# Following requirements must be met in order to use SPICE:
#
# * Virtualization driver must be libvirt
# * spice.enabled set to True
# * vnc.enabled set to False
# * update html5proxy_base_url
# * update server_proxyclient_address
#
# From nova.conf
#
#
# Enable SPICE related features.
#
# Related options:
#
# * VNC must be explicitly disabled to get access to the SPICE console. Set the
# enabled option to False in the [vnc] section to disable the VNC console.
# (boolean value)
#enabled = false
#
# Enable the SPICE guest agent support on the instances.
#
# The Spice agent works with the Spice protocol to offer a better guest console
# experience. However, the Spice console can still be used without the Spice
# Agent. With the Spice agent installed the following features are enabled:
#
# * Copy & Paste of text and images between the guest and client machine
# * Automatic adjustment of resolution when the client screen changes - e.g.
# if you make the Spice console full screen the guest resolution will adjust
# to
# match it rather than letterboxing.
# * Better mouse integration - The mouse can be captured and released without
# needing to click inside the console or press keys to release it. The
# performance of mouse movement is also improved.
# (boolean value)
#agent_enabled = true
#
# Location of the SPICE HTML5 console proxy.
#
# End user would use this URL to connect to the `nova-spicehtml5proxy``
# service. This service will forward request to the console of an instance.
#
# In order to use SPICE console, the service ``nova-spicehtml5proxy`` should be
# running. This service is typically launched on the controller node.
#
# Possible values:
#
# * Must be a valid URL of the form: ``http://host:port/spice_auto.html``
# where host is the node running ``nova-spicehtml5proxy`` and the port is
# typically 6082. Consider not using default value as it is not well defined
# for any real deployment.
#
# Related options:
#
# * This option depends on ``html5proxy_host`` and ``html5proxy_port`` options.
# The access URL returned by the compute node must have the host
# and port where the ``nova-spicehtml5proxy`` service is listening.
# (uri value)
#html5proxy_base_url = http://127.0.0.1:6082/spice_auto.html
#
# The address where the SPICE server running on the instances should listen.
#
# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the controller
# node and connects over the private network to this address on the compute
# node(s).
#
# Possible values:
#
# * IP address to listen on.
# (string value)
#server_listen = 127.0.0.1
#
# The address used by ``nova-spicehtml5proxy`` client to connect to instance
# console.
#
# Typically, the ``nova-spicehtml5proxy`` proxy client runs on the
# controller node and connects over the private network to this address on the
# compute node(s).
#
# Possible values:
#
# * Any valid IP address on the compute node.
#
# Related options:
#
# * This option depends on the ``server_listen`` option.
# The proxy client must be able to access the address specified in
# ``server_listen`` using the value of this option.
# (string value)
#server_proxyclient_address = 127.0.0.1
#
# A keyboard layout which is supported by the underlying hypervisor on this
# node.
#
# Possible values:
# * This is usually an 'IETF language tag' (default is 'en-us'). If you
# use QEMU as hypervisor, you should find the list of supported keyboard
# layouts at /usr/share/qemu/keymaps.
# (string value)
#keymap = en-us
# Warning: Failed to format sample for html5proxy_host
# unhashable type: 'HostAddress'
#
# Port on which the ``nova-spicehtml5proxy`` service listens for incoming
# requests.
#
# Related options:
#
# * This option depends on the ``html5proxy_base_url`` option.
# The ``nova-spicehtml5proxy`` service must be listening on a port that is
# accessible from the HTML5 client.
# (port value)
# Minimum value: 0
# Maximum value: 65535
#html5proxy_port = 6082
[upgrade_levels]
#
# upgrade_levels options are used to set version cap for RPC
# messages sent between different nova services.
#
# By default all services send messages using the latest version
# they know about.
#
# The compute upgrade level is an important part of rolling upgrades
# where old and new nova-compute services run side by side.
#
# The other options can largely be ignored, and are only kept to
# help with a possible future backport issue.
#
# From nova.conf
#
#
# Compute RPC API version cap.
#
# By default, we always send messages using the most recent version
# the client knows about.
#
# Where you have old and new compute services running, you should set
# this to the lowest deployed version. This is to guarantee that all
# services never send messages that one of the compute nodes can't
# understand. Note that we only support upgrading from release N to
# release N+1.
#
# Set this option to "auto" if you want to let the compute RPC module
# automatically determine what version to use based on the service
# versions in the deployment.
#
# Possible values:
#
# * By default send the latest version the client knows about
# * 'auto': Automatically determines what version to use based on
# the service versions in the deployment.
# * A string representing a version number in the format 'N.N';
# for example, possible values might be '1.12' or '2.0'.
# * An OpenStack release name, in lower case, such as 'mitaka' or
# 'liberty'.
# (string value)
#compute = <None>
# Cells RPC API version cap (string value)
#cells = <None>
# Intercell RPC API version cap (string value)
#intercell = <None>
# Cert RPC API version cap (string value)
#cert = <None>
# Scheduler RPC API version cap (string value)
#scheduler = <None>
# Conductor RPC API version cap (string value)
#conductor = <None>
# Console RPC API version cap (string value)
#console = <None>
# Consoleauth RPC API version cap (string value)
#consoleauth = <None>
# Network RPC API version cap (string value)
#network = <None>
# Base API RPC API version cap (string value)
#baseapi = <None>
[vault]
#
# From nova.conf
#
# root token for vault (string value)
#root_token_id = <None>
# Use this endpoint to connect to Vault, for example: "http://127.0.0.1:8200"
# (string value)
#vault_url = http://127.0.0.1:8200
# Absolute path to ca cert file (string value)
#ssl_ca_crt_file = <None>
# SSL Enabled/Disabled (boolean value)
#use_ssl = false
[vendordata_dynamic_auth]
#
# Options within this group control the authentication of the vendordata
# subsystem of the metadata API server (and config drive) with external systems.
#
# From nova.conf
#
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# (string value)
#cafile = <None>
# PEM encoded client certificate cert file (string value)
#certfile = <None>
# PEM encoded client certificate key file (string value)
#keyfile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Timeout value for http requests (integer value)
#timeout = <None>
# Authentication type to load (string value)
# Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (string value)
#auth_section = <None>
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
#project_id = <None>
# Project name to scope to (string value)
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# Trust ID (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User ID (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [vendordata_dynamic_auth]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
# Tenant ID (string value)
#tenant_id = <None>
# Tenant Name (string value)
#tenant_name = <None>
[vmware]
#
# Related options:
# Following options must be set in order to launch VMware-based
# virtual machines.
#
# * compute_driver: Must use vmwareapi.VMwareVCDriver.
# * vmware.host_username
# * vmware.host_password
# * vmware.cluster_name
#
# From nova.conf
#
#
# This option specifies the physical ethernet adapter name for VLAN
# networking.
#
# Set the vlan_interface configuration option to match the ESX host
# interface that handles VLAN-tagged VM traffic.
#
# Possible values:
#
# * Any valid string representing VLAN interface name
# (string value)
#vlan_interface = vmnic0
#
# This option should be configured only when using the NSX-MH Neutron
# plugin. This is the name of the integration bridge on the ESXi server
# or host. This should not be set for any other Neutron plugin. Hence
# the default value is not set.
#
# Possible values:
#
# * Any valid string representing the name of the integration bridge
# (string value)
#integration_bridge = <None>
#
# Set this value if affected by an increased network latency causing
# repeated characters when typing in a remote console.
# (integer value)
# Minimum value: 0
#console_delay_seconds = <None>
#
# Identifies the remote system where the serial port traffic will
# be sent.
#
# This option adds a virtual serial port which sends console output to
# a configurable service URI. At the service URI address there will be
# virtual serial port concentrator that will collect console logs.
# If this is not set, no serial ports will be added to the created VMs.
#
# Possible values:
#
# * Any valid URI
# (string value)
#serial_port_service_uri = <None>
#
# Identifies a proxy service that provides network access to the
# serial_port_service_uri.
#
# Possible values:
#
# * Any valid URI (The scheme is 'telnet' or 'telnets'.)
#
# Related options:
# This option is ignored if serial_port_service_uri is not specified.
# * serial_port_service_uri
# (uri value)
#serial_port_proxy_uri = <None>
#
# Specifies the directory where the Virtual Serial Port Concentrator is
# storing console log files. It should match the 'serial_log_dir' config
# value of VSPC.
# (string value)
#serial_log_dir = /opt/vmware/vspc
# Warning: Failed to format sample for host_ip
# unhashable type: 'HostAddress'
# Port for connection to VMware vCenter host. (port value)
# Minimum value: 0
# Maximum value: 65535
#host_port = 443
# Username for connection to VMware vCenter host. (string value)
#host_username = <None>
# Password for connection to VMware vCenter host. (string value)
#host_password = <None>
#
# Specifies the CA bundle file to be used in verifying the vCenter
# server certificate.
# (string value)
#ca_file = <None>
#
# If true, the vCenter server certificate is not verified. If false,
# then the default CA truststore is used for verification.
#
# Related options:
# * ca_file: This option is ignored if "ca_file" is set.
# (boolean value)
#insecure = false
# Name of a VMware Cluster ComputeResource. (string value)
#cluster_name = <None>
#
# Regular expression pattern to match the name of datastore.
#
# The datastore_regex setting specifies the datastores to use with
# Compute. For example, datastore_regex="nas.*" selects all the data
# stores that have a name starting with "nas".
#
# NOTE: If no regex is given, it just picks the datastore with the
# most freespace.
#
# Possible values:
#
# * Any matching regular expression to a datastore must be given
# (string value)
#datastore_regex = <None>
#
# Time interval in seconds to poll remote tasks invoked on
# VMware VC server.
# (floating point value)
#task_poll_interval = 0.5
#
# Number of times VMware vCenter server API must be retried on connection
# failures, e.g. socket error, etc.
# (integer value)
# Minimum value: 0
#api_retry_count = 10
#
# This option specifies VNC starting port.
#
# Every VM created by ESX host has an option of enabling VNC client
# for remote connection. Above option 'vnc_port' helps you to set
# default starting port for the VNC client.
#
# Possible values:
#
# * Any valid port number within 5900 -(5900 + vnc_port_total)
#
# Related options:
# Below options should be set to enable VNC client.
# * vnc.enabled = True
# * vnc_port_total
# (port value)
# Minimum value: 0
# Maximum value: 65535
#vnc_port = 5900
#
# Total number of VNC ports.
# (integer value)
# Minimum value: 0
#vnc_port_total = 10000
#
# This option enables/disables the use of linked clone.
#
# The ESX hypervisor requires a copy of the VMDK file in order to boot
# up a virtual machine. The compute driver must download the VMDK via
# HTTP from the OpenStack Image service to a datastore that is visible
# to the hypervisor and cache it. Subsequent virtual machines that need
# the VMDK use the cached version and don't have to copy the file again
# from the OpenStack Image service.
#
# If set to false, even with a cached VMDK, there is still a copy
# operation from the cache location to the hypervisor file directory
# in the shared datastore. If set to true, the above copy operation
# is avoided as it creates copy of the virtual machine that shares
# virtual disks with its parent VM.
# (boolean value)
#use_linked_clone = true
#
# This option sets the http connection pool size
#
# The connection pool size is the maximum number of connections from nova to
# vSphere. It should only be increased if there are warnings indicating that
# the connection pool is full, otherwise, the default should suffice.
# (integer value)
# Minimum value: 10
#connection_pool_size = 10
#
# This option enables or disables storage policy based placement
# of instances.
#
# Related options:
#
# * pbm_default_policy
# (boolean value)
#pbm_enabled = false
#
# This option specifies the PBM service WSDL file location URL.
#
# Setting this will disable storage policy based placement
# of instances.
#
# Possible values:
#
# * Any valid file path
# e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
# (string value)
#pbm_wsdl_location = <None>
#
# This option specifies the default policy to be used.
#
# If pbm_enabled is set and there is no defined storage policy for the
# specific request, then this policy will be used.
#
# Possible values:
#
# * Any valid storage policy such as VSAN default storage policy
#
# Related options:
#
# * pbm_enabled
# (string value)
#pbm_default_policy = <None>
#
# This option specifies the limit on the maximum number of objects to
# return in a single result.
#
# A positive value will cause the operation to suspend the retrieval
# when the count of objects reaches the specified limit. The server may
# still limit the count to something less than the configured value.
# Any remaining objects may be retrieved with additional requests.
# (integer value)
# Minimum value: 0
#maximum_objects = 100
#
# This option adds a prefix to the folder where cached images are stored
#
# This is not the full path - just a folder prefix. This should only be
# used when a datastore cache is shared between compute nodes.
#
# Note: This should only be used when the compute nodes are running on same
# host or they have a shared file system.
#
# Possible values:
#
# * Any string representing the cache prefix to the folder
# (string value)
#cache_prefix = <None>
[vnc]
#
# Virtual Network Computer (VNC) can be used to provide remote desktop
# console access to instances for tenants and/or administrators.
#
# From nova.conf
#
#
# Enable VNC related features.
#
# Guests will get created with graphical devices to support this. Clients
# (for example Horizon) can then establish a VNC connection to the guest.
# (boolean value)
# Deprecated group/name - [DEFAULT]/vnc_enabled
#enabled = true
#
# Keymap for VNC.
#
# The keyboard mapping (keymap) determines which keyboard layout a VNC
# session should use by default.
#
# Possible values:
#
# * A keyboard layout which is supported by the underlying hypervisor on
# this node. This is usually an 'IETF language tag' (for example
# 'en-us'). If you use QEMU as hypervisor, you should find the list
# of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
# (string value)
# Deprecated group/name - [DEFAULT]/vnc_keymap
#keymap = en-us
# Warning: Failed to format sample for server_listen
# unhashable type: 'HostAddress'
# Warning: Failed to format sample for server_proxyclient_address
# unhashable type: 'HostAddress'
#
# Public address of noVNC VNC console proxy.
#
# The VNC proxy is an OpenStack component that enables compute service
# users to access their instances through VNC clients. noVNC provides
# VNC support through a websocket-based client.
#
# This option sets the public base URL to which client systems will
# connect. noVNC clients can use this address to connect to the noVNC
# instance and, by extension, the VNC sessions.
#
# Related options:
#
# * novncproxy_host
# * novncproxy_port
# (uri value)
#novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
# Warning: Failed to format sample for xvpvncproxy_host
# unhashable type: 'HostAddress'
#
# Port that the XVP VNC console proxy should bind to.
#
# The VNC proxy is an OpenStack component that enables compute service
# users to access their instances through VNC clients. Xen provides
# the Xenserver VNC Proxy, or XVP, as an alternative to the
# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
# XVP clients are Java-based.
#
# This option sets the private port to which the XVP VNC console proxy
# service should bind to.
#
# Related options:
#
# * xvpvncproxy_host
# * xvpvncproxy_base_url
# (port value)
# Minimum value: 0
# Maximum value: 65535
#xvpvncproxy_port = 6081
#
# Public URL address of XVP VNC console proxy.
#
# The VNC proxy is an OpenStack component that enables compute service
# users to access their instances through VNC clients. Xen provides
# the Xenserver VNC Proxy, or XVP, as an alternative to the
# websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
# XVP clients are Java-based.
#
# This option sets the public base URL to which client systems will
# connect. XVP clients can use this address to connect to the XVP
# instance and, by extension, the VNC sessions.
#
# Related options:
#
# * xvpvncproxy_host
# * xvpvncproxy_port
# (uri value)
#xvpvncproxy_base_url = http://127.0.0.1:6081/console
#
# IP address that the noVNC console proxy should bind to.
#
# The VNC proxy is an OpenStack component that enables compute service
# users to access their instances through VNC clients. noVNC provides
# VNC support through a websocket-based client.
#
# This option sets the private address to which the noVNC console proxy
# service should bind to.
#
# Related options:
#
# * novncproxy_port
# * novncproxy_base_url
# (string value)
#novncproxy_host = 0.0.0.0
#
# Port that the noVNC console proxy should bind to.
#
# The VNC proxy is an OpenStack component that enables compute service
# users to access their instances through VNC clients. noVNC provides
# VNC support through a websocket-based client.
#
# This option sets the private port to which the noVNC console proxy
# service should bind to.
#
# Related options:
#
# * novncproxy_host
# * novncproxy_base_url
# (port value)
# Minimum value: 0
# Maximum value: 65535
#novncproxy_port = 6080
#
# The authentication schemes to use with the compute node.
#
# Control what RFB authentication schemes are permitted for connections between
# the proxy and the compute host. If multiple schemes are enabled, the first
# matching scheme will be used, thus the strongest schemes should be listed
# first.
#
# Possible values:
#
# * ``none``: allow connection without authentication
# * ``vencrypt``: use VeNCrypt authentication scheme
#
# Related options:
#
# * ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must also be
# set
# (list value)
#auth_schemes = none
# The path to the client certificate PEM file (for x509)
#
# The fully qualified path to a PEM file containing the private key which the
# VNC
# proxy server presents to the compute node during VNC authentication.
#
# Related options:
#
# * ``vnc.auth_schemes``: must include ``vencrypt``
# * ``vnc.vencrypt_client_cert``: must also be set
# (string value)
#vencrypt_client_key = <None>
# The path to the client key file (for x509)
#
# The fully qualified path to a PEM file containing the x509 certificate which
# the VNC proxy server presents to the compute node during VNC authentication.
#
# Realted options:
#
# * ``vnc.auth_schemes``: must include ``vencrypt``
# * ``vnc.vencrypt_client_key``: must also be set
# (string value)
#vencrypt_client_cert = <None>
# The path to the CA certificate PEM file
#
# The fully qualified path to a PEM file containing one or more x509
# certificates
# for the certificate authorities used by the compute node VNC server.
#
# Related options:
#
# * ``vnc.auth_schemes``: must include ``vencrypt``
# (string value)
#vencrypt_ca_certs = <None>
[workarounds]
#
# A collection of workarounds used to mitigate bugs or issues found in system
# tools (e.g. Libvirt or QEMU) or Nova itself under certain conditions. These
# should only be enabled in exceptional circumstances. All options are linked
# against bug IDs, where more information on the issue can be found.
#
# From nova.conf
#
#
# Use sudo instead of rootwrap.
#
# Allow fallback to sudo for performance reasons.
#
# For more information, refer to the bug report:
#
# https://bugs.launchpad.net/nova/+bug/1415106
#
# Possible values:
#
# * True: Use sudo instead of rootwrap
# * False: Use rootwrap as usual
#
# Interdependencies to other options:
#
# * Any options that affect 'rootwrap' will be ignored.
# (boolean value)
#disable_rootwrap = false
#
# Disable live snapshots when using the libvirt driver.
#
# Live snapshots allow the snapshot of the disk to happen without an
# interruption to the guest, using coordination with a guest agent to
# quiesce the filesystem.
#
# When using libvirt 1.2.2 live snapshots fail intermittently under load
# (likely related to concurrent libvirt/qemu operations). This config
# option provides a mechanism to disable live snapshot, in favor of cold
# snapshot, while this is resolved. Cold snapshot causes an instance
# outage while the guest is going through the snapshotting process.
#
# For more information, refer to the bug report:
#
# https://bugs.launchpad.net/nova/+bug/1334398
#
# Possible values:
#
# * True: Live snapshot is disabled when using libvirt
# * False: Live snapshots are always used when snapshotting (as long as
# there is a new enough libvirt and the backend storage supports it)
# (boolean value)
#disable_libvirt_livesnapshot = false
#
# Enable handling of events emitted from compute drivers.
#
# Many compute drivers emit lifecycle events, which are events that occur when,
# for example, an instance is starting or stopping. If the instance is going
# through task state changes due to an API operation, like resize, the events
# are ignored.
#
# This is an advanced feature which allows the hypervisor to signal to the
# compute service that an unexpected state change has occurred in an instance
# and that the instance can be shutdown automatically. Unfortunately, this can
# race in some conditions, for example in reboot operations or when the compute
# service or when host is rebooted (planned or due to an outage). If such races
# are common, then it is advisable to disable this feature.
#
# Care should be taken when this feature is disabled and
# 'sync_power_state_interval' is set to a negative value. In this case, any
# instances that get out of sync between the hypervisor and the Nova database
# will have to be synchronized manually.
#
# For more information, refer to the bug report:
#
# https://bugs.launchpad.net/bugs/1444630
#
# Interdependencies to other options:
#
# * If ``sync_power_state_interval`` is negative and this feature is disabled,
# then instances that get out of sync between the hypervisor and the Nova
# database will have to be synchronized manually.
# (boolean value)
#handle_virt_lifecycle_events = true
#
# Disable the server group policy check upcall in compute.
#
# In order to detect races with server group affinity policy, the compute
# service attempts to validate that the policy was not violated by the
# scheduler. It does this by making an upcall to the API database to list
# the instances in the server group for one that it is booting, which violates
# our api/cell isolation goals. Eventually this will be solved by proper
# affinity
# guarantees in the scheduler and placement service, but until then, this late
# check is needed to ensure proper affinity policy.
#
# Operators that desire api/cell isolation over this check should
# enable this flag, which will avoid making that upcall from compute.
#
# Related options:
#
# * [filter_scheduler]/track_instance_changes also relies on upcalls from the
# compute service to the scheduler service.
# (boolean value)
#disable_group_policy_check_upcall = false
#
# Ensure the instance directory is removed during clean up when using rbd.
#
# When enabled this workaround will ensure that the instance directory is always
# removed during cleanup on hosts using ``[libvirt]/images_type=rbd``. This
# avoids the following bugs with evacuation and revert resize clean up that lead
# to the instance directory remaining on the host:
#
# https://bugs.launchpad.net/nova/+bug/1414895
#
# https://bugs.launchpad.net/nova/+bug/1761062
#
# Both of these bugs can then result in ``DestinationDiskExists`` errors being
# raised if the instances ever attempt to return to the host.
#
# .. warning:: Operators will need to ensure that the instance directory itself,
# specified by ``[DEFAULT]/instances_path``, is not shared between computes
# before enabling this workaround otherwise the console.log, kernels, ramdisks
# and any additional files being used by the running instance will be lost.
#
# Related options:
#
# * ``compute_driver`` (libvirt)
# * ``[libvirt]/images_type`` (rbd)
# * ``instances_path``
# (boolean value)
#ensure_libvirt_rbd_instance_dir_cleanup = false
#
# Enable live migration of instances with NUMA topologies.
#
# Live migration of instances with NUMA topologies is disabled by default
# when using the libvirt driver. This includes live migration of instances with
# CPU pinning or hugepages. CPU pinning and huge page information for such
# instances is not currently re-calculated, as noted in bug #1289064. This
# means that if instances were already present on the destination host, the
# migrated instance could be placed on the same dedicated cores as these
# instances or use hugepages allocated for another instance. Alternately, if the
# host platforms were not homogeneous, the instance could be assigned to
# non-existent cores or be inadvertently split across host NUMA nodes.
#
# Despite these known issues, there may be cases where live migration is
# necessary. By enabling this option, operators that are aware of the issues and
# are willing to manually work around them can enable live migration support for
# these instances.
#
# Related options:
#
# * ``compute_driver``: Only the libvirt driver is affected.
# (boolean value)
#enable_numa_live_migration = false
[wsgi]
#
# Options under this group are used to configure WSGI (Web Server Gateway
# Interface). WSGI is used to serve API requests.
#
# From nova.conf
#
#
# This option represents a file name for the paste.deploy config for nova-api.
#
# Possible values:
#
# * A string representing file name for the paste.deploy config.
# (string value)
#api_paste_config = api-paste.ini
# DEPRECATED:
# It represents a python format string that is used as the template to generate
# log lines. The following values can be formatted into it: client_ip,
# date_time, request_line, status_code, body_length, wall_seconds.
#
# This option is used for building custom request loglines when running
# nova-api under eventlet. If used under uwsgi or apache, this option
# has no effect.
#
# Possible values:
#
# * '%(client_ip)s "%(request_line)s" status: %(status_code)s'
# 'len: %(body_length)s time: %(wall_seconds).7f' (default)
# * Any formatted string formed by specific values.
# (string value)
# This option is deprecated for removal since 16.0.0.
# Its value may be silently ignored in the future.
# Reason:
# This option only works when running nova-api under eventlet, and
# encodes very eventlet specific pieces of information. Starting in Pike
# the preferred model for running nova-api is under uwsgi or apache
# mod_wsgi.
#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
#
# This option specifies the HTTP header used to determine the protocol scheme
# for the original request, even if it was removed by a SSL terminating proxy.
#
# Possible values:
#
# * None (default) - the request scheme is not influenced by any HTTP headers
# * Valid HTTP header, like HTTP_X_FORWARDED_PROTO
#
# WARNING: Do not set this unless you know what you are doing.
#
# Make sure ALL of the following are true before setting this (assuming the
# values from the example above):
# * Your API is behind a proxy.
# * Your proxy strips the X-Forwarded-Proto header from all incoming requests.
# In other words, if end users include that header in their requests, the
# proxy
# will discard it.
# * Your proxy sets the X-Forwarded-Proto header and sends it to API, but only
# for requests that originally come in via HTTPS.
#
# If any of those are not true, you should keep this setting set to None.
#
# (string value)
#secure_proxy_ssl_header = <None>
#
# This option allows setting path to the CA certificate file that should be used
# to verify connecting clients.
#
# Possible values:
#
# * String representing path to the CA certificate file.
#
# Related options:
#
# * enabled_ssl_apis
# (string value)
#ssl_ca_file = <None>
#
# This option allows setting path to the SSL certificate of API server.
#
# Possible values:
#
# * String representing path to the SSL certificate.
#
# Related options:
#
# * enabled_ssl_apis
# (string value)
#ssl_cert_file = <None>
#
# This option specifies the path to the file where SSL private key of API
# server is stored when SSL is in effect.
#
# Possible values:
#
# * String representing path to the SSL private key.
#
# Related options:
#
# * enabled_ssl_apis
# (string value)
#ssl_key_file = <None>
#
# This option sets the value of TCP_KEEPIDLE in seconds for each server socket.
# It specifies the duration of time to keep connection active. TCP generates a
# KEEPALIVE transmission for an application that requests to keep connection
# active. Not supported on OS X.
#
# Related options:
#
# * keep_alive
# (integer value)
# Minimum value: 0
#tcp_keepidle = 600
#
# This option specifies the size of the pool of greenthreads used by wsgi.
# It is possible to limit the number of concurrent connections using this
# option.
# (integer value)
# Minimum value: 0
# Deprecated group/name - [DEFAULT]/wsgi_default_pool_size
#default_pool_size = 1000
#
# This option specifies the maximum line size of message headers to be accepted.
# max_header_line may need to be increased when using large tokens (typically
# those generated by the Keystone v3 API with big service catalogs).
#
# Since TCP is a stream based protocol, in order to reuse a connection, the HTTP
# has to have a way to indicate the end of the previous response and beginning
# of the next. Hence, in a keep_alive case, all messages must have a
# self-defined message length.
# (integer value)
# Minimum value: 0
#max_header_line = 16384
#
# This option allows using the same TCP connection to send and receive multiple
# HTTP requests/responses, as opposed to opening a new one for every single
# request/response pair. HTTP keep-alive indicates HTTP connection reuse.
#
# Possible values:
#
# * True : reuse HTTP connection.
# * False : closes the client socket connection explicitly.
#
# Related options:
#
# * tcp_keepidle
# (boolean value)
# Deprecated group/name - [DEFAULT]/wsgi_keep_alive
#keep_alive = true
#
# This option specifies the timeout for client connections' socket operations.
# If an incoming connection is idle for this number of seconds it will be
# closed. It indicates timeout on individual read/writes on the socket
# connection. To wait forever set to 0.
# (integer value)
# Minimum value: 0
#client_socket_timeout = 900
[xenserver]
#
# XenServer options are used when the compute_driver is set to use
# XenServer (compute_driver=xenapi.XenAPIDriver).
#
# Must specify connection_url, connection_password and ovs_integration_bridge to
# use compute_driver=xenapi.XenAPIDriver.
#
# From nova.conf
#
#
# Number of seconds to wait for agent's reply to a request.
#
# Nova configures/performs certain administrative actions on a server with the
# help of an agent that's installed on the server. The communication between
# Nova and the agent is achieved via sharing messages, called records, over
# xenstore, a shared storage across all the domains on a Xenserver host.
# Operations performed by the agent on behalf of nova are: 'version','
# key_init',
# 'password','resetnetwork','inject_file', and 'agentupdate'.
#
# To perform one of the above operations, the xapi 'agent' plugin writes the
# command and its associated parameters to a certain location known to the
# domain
# and awaits response. On being notified of the message, the agent performs
# appropriate actions on the server and writes the result back to xenstore. This
# result is then read by the xapi 'agent' plugin to determine the
# success/failure
# of the operation.
#
# This config option determines how long the xapi 'agent' plugin shall wait to
# read the response off of xenstore for a given request/command. If the agent on
# the instance fails to write the result in this time period, the operation is
# considered to have timed out.
#
# Related options:
#
# * ``agent_version_timeout``
# * ``agent_resetnetwork_timeout``
#
# (integer value)
# Minimum value: 0
#agent_timeout = 30
#
# Number of seconds to wait for agent't reply to version request.
#
# This indicates the amount of time xapi 'agent' plugin waits for the agent to
# respond to the 'version' request specifically. The generic timeout for agent
# communication ``agent_timeout`` is ignored in this case.
#
# During the build process the 'version' request is used to determine if the
# agent is available/operational to perform other requests such as
# 'resetnetwork', 'password', 'key_init' and 'inject_file'. If the 'version'
# call
# fails, the other configuration is skipped. So, this configuration option can
# also be interpreted as time in which agent is expected to be fully
# operational.
# (integer value)
# Minimum value: 0
#agent_version_timeout = 300
#
# Number of seconds to wait for agent's reply to resetnetwork
# request.
#
# This indicates the amount of time xapi 'agent' plugin waits for the agent to
# respond to the 'resetnetwork' request specifically. The generic timeout for
# agent communication ``agent_timeout`` is ignored in this case.
# (integer value)
# Minimum value: 0
#agent_resetnetwork_timeout = 60
#
# Path to locate guest agent on the server.
#
# Specifies the path in which the XenAPI guest agent should be located. If the
# agent is present, network configuration is not injected into the image.
#
# Related options:
#
# For this option to have an effect:
# * ``flat_injected`` should be set to ``True``
# * ``compute_driver`` should be set to ``xenapi.XenAPIDriver``
#
# (string value)
#agent_path = usr/sbin/xe-update-networking
#
# Disables the use of XenAPI agent.
#
# This configuration option suggests whether the use of agent should be enabled
# or not regardless of what image properties are present. Image properties have
# an effect only when this is set to ``True``. Read description of config option
# ``use_agent_default`` for more information.
#
# Related options:
#
# * ``use_agent_default``
#
# (boolean value)
#disable_agent = false
#
# Whether or not to use the agent by default when its usage is enabled but not
# indicated by the image.
#
# The use of XenAPI agent can be disabled altogether using the configuration
# option ``disable_agent``. However, if it is not disabled, the use of an agent
# can still be controlled by the image in use through one of its properties,
# ``xenapi_use_agent``. If this property is either not present or specified
# incorrectly on the image, the use of agent is determined by this configuration
# option.
#
# Note that if this configuration is set to ``True`` when the agent is not
# present, the boot times will increase significantly.
#
# Related options:
#
# * ``disable_agent``
#
# (boolean value)
#use_agent_default = false
# Timeout in seconds for XenAPI login. (integer value)
# Minimum value: 0
#login_timeout = 10
#
# Maximum number of concurrent XenAPI connections.
#
# In nova, multiple XenAPI requests can happen at a time.
# Configuring this option will parallelize access to the XenAPI
# session, which allows you to make concurrent XenAPI connections.
# (integer value)
# Minimum value: 1
#connection_concurrent = 5
#
# Cache glance images locally.
#
# The value for this option must be chosen from the choices listed
# here. Configuring a value other than these will default to 'all'.
#
# Note: There is nothing that deletes these images.
#
# Possible values:
#
# * `all`: will cache all images.
# * `some`: will only cache images that have the
# image_property `cache_in_nova=True`.
# * `none`: turns off caching entirely.
# (string value)
# Possible values:
# all - <No description provided>
# some - <No description provided>
# none - <No description provided>
#cache_images = all
#
# Compression level for images.
#
# By setting this option we can configure the gzip compression level.
# This option sets GZIP environment variable before spawning tar -cz
# to force the compression level. It defaults to none, which means the
# GZIP environment variable is not set and the default (usually -6)
# is used.
#
# Possible values:
#
# * Range is 1-9, e.g., 9 for gzip -9, 9 being most
# compressed but most CPU intensive on dom0.
# * Any values out of this range will default to None.
# (integer value)
# Minimum value: 1
# Maximum value: 9
#image_compression_level = <None>
# Default OS type used when uploading an image to glance (string value)
#default_os_type = linux
# Time in secs to wait for a block device to be created (integer value)
# Minimum value: 1
#block_device_creation_timeout = 10
#
# Maximum size in bytes of kernel or ramdisk images.
#
# Specifying the maximum size of kernel or ramdisk will avoid copying
# large files to dom0 and fill up /boot/guest.
# (integer value)
#max_kernel_ramdisk_size = 16777216
#
# Filter for finding the SR to be used to install guest instances on.
#
# Possible values:
#
# * To use the Local Storage in default XenServer/XCP installations
# set this flag to other-config:i18n-key=local-storage.
# * To select an SR with a different matching criteria, you could
# set it to other-config:my_favorite_sr=true.
# * To fall back on the Default SR, as displayed by XenCenter,
# set this flag to: default-sr:true.
# (string value)
#sr_matching_filter = default-sr:true
#
# Whether to use sparse_copy for copying data on a resize down.
# (False will use standard dd). This speeds up resizes down
# considerably since large runs of zeros won't have to be rsynced.
# (boolean value)
#sparse_copy = true
#
# Maximum number of retries to unplug VBD.
# If set to 0, should try once, no retries.
# (integer value)
# Minimum value: 0
#num_vbd_unplug_retries = 10
#
# Name of network to use for booting iPXE ISOs.
#
# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
# This feature gives a means to roll your own image.
#
# By default this option is not set. Enable this option to
# boot an iPXE ISO.
#
# Related Options:
#
# * `ipxe_boot_menu_url`
# * `ipxe_mkisofs_cmd`
# (string value)
#ipxe_network_name = <None>
#
# URL to the iPXE boot menu.
#
# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
# This feature gives a means to roll your own image.
#
# By default this option is not set. Enable this option to
# boot an iPXE ISO.
#
# Related Options:
#
# * `ipxe_network_name`
# * `ipxe_mkisofs_cmd`
# (string value)
#ipxe_boot_menu_url = <None>
#
# Name and optionally path of the tool used for ISO image creation.
#
# An iPXE ISO is a specially crafted ISO which supports iPXE booting.
# This feature gives a means to roll your own image.
#
# Note: By default `mkisofs` is not present in the Dom0, so the
# package can either be manually added to Dom0 or include the
# `mkisofs` binary in the image itself.
#
# Related Options:
#
# * `ipxe_network_name`
# * `ipxe_boot_menu_url`
# (string value)
#ipxe_mkisofs_cmd = mkisofs
#
# URL for connection to XenServer/Xen Cloud Platform. A special value
# of unix://local can be used to connect to the local unix socket.
#
# Possible values:
#
# * Any string that represents a URL. The connection_url is
# generally the management network IP address of the XenServer.
# * This option must be set if you chose the XenServer driver.
# (string value)
#connection_url = <None>
# Username for connection to XenServer/Xen Cloud Platform (string value)
#connection_username = root
# Password for connection to XenServer/Xen Cloud Platform (string value)
#connection_password = <None>
#
# The interval used for polling of coalescing vhds.
#
# This is the interval after which the task of coalesce VHD is
# performed, until it reaches the max attempts that is set by
# vhd_coalesce_max_attempts.
#
# Related options:
#
# * `vhd_coalesce_max_attempts`
# (floating point value)
# Minimum value: 0
#vhd_coalesce_poll_interval = 5.0
#
# Ensure compute service is running on host XenAPI connects to.
# This option must be set to false if the 'independent_compute'
# option is set to true.
#
# Possible values:
#
# * Setting this option to true will make sure that compute service
# is running on the same host that is specified by connection_url.
# * Setting this option to false, doesn't perform the check.
#
# Related options:
#
# * `independent_compute`
# (boolean value)
#check_host = true
#
# Max number of times to poll for VHD to coalesce.
#
# This option determines the maximum number of attempts that can be
# made for coalescing the VHD before giving up.
#
# Related opitons:
#
# * `vhd_coalesce_poll_interval`
# (integer value)
# Minimum value: 0
#vhd_coalesce_max_attempts = 20
# Base path to the storage repository on the XenServer host. (string value)
#sr_base_path = /var/run/sr-mount
# Warning: Failed to format sample for target_host
# unhashable type: 'HostAddress'
#
# The iSCSI Target Port.
#
# This option represents the port of the iSCSI Target. If the
# target port is not present in the connection information from the
# volume provider then the value from this option is taken.
# (port value)
# Minimum value: 0
# Maximum value: 65535
#target_port = 3260
#
# Used to prevent attempts to attach VBDs locally, so Nova can
# be run in a VM on a different host.
#
# Related options:
#
# * ``CONF.flat_injected`` (Must be False)
# * ``CONF.xenserver.check_host`` (Must be False)
# * ``CONF.default_ephemeral_format`` (Must be unset or 'ext3')
# * Joining host aggregates (will error if attempted)
# * Swap disks for Windows VMs (will error if attempted)
# * Nova-based auto_configure_disk (will error if attempted)
# (boolean value)
#independent_compute = false
#
# Wait time for instances to go to running state.
#
# Provide an integer value representing time in seconds to set the
# wait time for an instance to go to running state.
#
# When a request to create an instance is received by nova-api and
# communicated to nova-compute, the creation of the instance occurs
# through interaction with Xen via XenAPI in the compute node. Once
# the node on which the instance(s) are to be launched is decided by
# nova-schedule and the launch is triggered, a certain amount of wait
# time is involved until the instance(s) can become available and
# 'running'. This wait time is defined by running_timeout. If the
# instances do not go to running state within this specified wait
# time, the launch expires and the instance(s) are set to 'error'
# state.
# (integer value)
# Minimum value: 0
#running_timeout = 60
# DEPRECATED:
# The XenAPI VIF driver using XenServer Network APIs.
#
# Provide a string value representing the VIF XenAPI vif driver to use for
# plugging virtual network interfaces.
#
# Xen configuration uses bridging within the backend domain to allow
# all VMs to appear on the network as individual hosts. Bridge
# interfaces are used to create a XenServer VLAN network in which
# the VIFs for the VM instances are plugged. If no VIF bridge driver
# is plugged, the bridge is not made available. This configuration
# option takes in a value for the VIF driver.
#
# Possible values:
#
# * nova.virt.xenapi.vif.XenAPIOpenVswitchDriver (default)
# * nova.virt.xenapi.vif.XenAPIBridgeDriver (deprecated)
#
# Related options:
#
# * ``vlan_interface``
# * ``ovs_integration_bridge``
# (string value)
# This option is deprecated for removal since 15.0.0.
# Its value may be silently ignored in the future.
# Reason:
# There are only two in-tree vif drivers for XenServer. XenAPIBridgeDriver is
# for
# nova-network which is deprecated and XenAPIOpenVswitchDriver is for Neutron
# which is the default configuration for Nova since the 15.0.0 Ocata release. In
# the future the "use_neutron" configuration option will be used to determine
# which vif driver to use.
#vif_driver = nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
#
# Dom0 plugin driver used to handle image uploads.
#
# Provide a string value representing a plugin driver required to
# handle the image uploading to GlanceStore.
#
# Images, and snapshots from XenServer need to be uploaded to the data
# store for use. image_upload_handler takes in a value for the Dom0
# plugin driver. This driver is then called to uplaod images to the
# GlanceStore.
# (string value)
#image_upload_handler = nova.virt.xenapi.image.glance.GlanceStore
#
# Number of seconds to wait for SR to settle if the VDI
# does not exist when first introduced.
#
# Some SRs, particularly iSCSI connections are slow to see the VDIs
# right after they got introduced. Setting this option to a
# time interval will make the SR to wait for that time period
# before raising VDI not found exception.
# (integer value)
# Minimum value: 0
#introduce_vdi_retry_wait = 20
#
# The name of the integration Bridge that is used with xenapi
# when connecting with Open vSwitch.
#
# Note: The value of this config option is dependent on the
# environment, therefore this configuration value must be set
# accordingly if you are using XenAPI.
#
# Possible values:
#
# * Any string that represents a bridge name.
# (string value)
#ovs_integration_bridge = <None>
#
# When adding new host to a pool, this will append a --force flag to the
# command, forcing hosts to join a pool, even if they have different CPUs.
#
# Since XenServer version 5.6 it is possible to create a pool of hosts that have
# different CPU capabilities. To accommodate CPU differences, XenServer limited
# features it uses to determine CPU compatibility to only the ones that are
# exposed by CPU and support for CPU masking was added.
# Despite this effort to level differences between CPUs, it is still possible
# that adding new host will fail, thus option to force join was introduced.
# (boolean value)
#use_join_force = true
#
# Publicly visible name for this console host.
#
# Possible values:
#
# * Current hostname (default) or any string representing hostname.
# (string value)
#console_public_hostname = <current_hostname>
[xvp]
#
# Configuration options for XVP.
#
# xvp (Xen VNC Proxy) is a proxy server providing password-protected VNC-based
# access to the consoles of virtual machines hosted on Citrix XenServer.
#
# From nova.conf
#
# XVP conf template (string value)
#console_xvp_conf_template = $pybasedir/nova/console/xvp.conf.template
# Generated XVP conf file (string value)
#console_xvp_conf = /etc/xvp.conf
# XVP master process pid file (string value)
#console_xvp_pid = /var/run/xvp.pid
# XVP log file (string value)
#console_xvp_log = /var/log/xvp.log
# Port for XVP to multiplex VNC connections on (port value)
# Minimum value: 0
# Maximum value: 65535
#console_xvp_multiplex_port = 5900
Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. See all OpenStack Legal Documents.