tags: | openstack, nova, cloud, ansible |
---|---|
category: | *nix |
# Enable/Disable ceilometer configurations
nova_ceilometer_enabled: False
## Verbosity Options
debug: False
# Set the package install state for distribution packages
# Options are 'present' and 'latest'
nova_package_state: "latest"
nova_git_repo: https://git.openstack.org/openstack/nova
nova_git_install_branch: stable/newton
nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
nova_lxd_git_install_branch: stable/newton
nova_developer_mode: false
nova_developer_constraints:
- "git+{{ nova_git_repo }}@{{ nova_git_install_branch }}#egg=nova"
- "git+{{ nova_lxd_git_repo }}@{{ nova_lxd_git_install_branch }}#egg=nova-lxd"
# Name of the virtual env to deploy into
nova_venv_tag: untagged
nova_bin: "/openstack/venvs/nova-{{ nova_venv_tag }}/bin"
nova_venv_download_url: http://127.0.0.1/venvs/untagged/ubuntu/nova.tgz
nova_fatal_deprecations: False
## Nova user information
nova_system_user_name: nova
nova_system_group_name: nova
nova_system_shell: /bin/bash
nova_system_comment: nova system user
nova_system_home_folder: "/var/lib/{{ nova_system_user_name }}"
nova_libvirt_save_path: "{{ nova_system_home_folder }}/save"
nova_lock_path: "/var/lock/nova"
## Manually specified nova UID/GID
# Deployers can specify a UID for the nova user as well as the GID for the
# nova group if needed. This is commonly used in environments where shared
# storage is used, such as NFS or GlusterFS, and nova UID/GID values must be
# in sync between multiple servers.
#
# WARNING: Changing these values on an existing deployment can lead to
# failures, errors, and instability.
#
# nova_system_user_uid = <UID>
# nova_system_group_gid = <GID>
## DB
nova_galera_user: nova
nova_galera_database: nova
nova_db_max_overflow: 10
nova_db_max_pool_size: 120
nova_db_pool_timeout: 30
## DB API
nova_api_galera_user: nova_api
nova_api_galera_database: nova_api
nova_api_db_max_overflow: 10
nova_api_db_max_pool_size: 120
nova_api_db_pool_timeout: 30
## RabbitMQ info
## Configuration for RPC communications
nova_rpc_backend: rabbit
nova_rpc_thread_pool_size: 64
nova_rpc_conn_pool_size: 30
nova_rpc_response_timeout: 60
nova_rabbitmq_servers: 127.0.0.1
nova_rabbitmq_port: 5672
nova_rabbitmq_userid: nova
nova_rabbitmq_vhost: /nova
nova_rabbitmq_use_ssl: False
## Configuration for notifications communication, i.e. [oslo_messaging_notifications]
nova_rabbitmq_telemetry_userid: "{{ nova_rabbitmq_userid }}"
nova_rabbitmq_telemetry_password: "{{ nova_rabbitmq_password }}"
nova_rabbitmq_telemetry_vhost: "{{ nova_rabbitmq_vhost }}"
nova_rabbitmq_telemetry_port: "{{ nova_rabbitmq_port }}"
nova_rabbitmq_telemetry_servers: "{{ nova_rabbitmq_servers }}"
nova_rabbitmq_telemetry_use_ssl: "{{ nova_rabbitmq_use_ssl }}"
## Nova virtualization Types
# The nova_virt_types dictionary contains global overrides used for
# specific compute types. Every variable inside of this dictionary
# will become an ansible fact. This gives the user the option to set
# or customize things based on their needs without having to redefine
# this entire data structure. Every supported compute type will be
# have its specific variable requirements set under its short name.
nova_virt_types:
ironic:
nova_compute_driver: ironic.IronicDriver
nova_scheduler_host_manager: ironic_host_manager
nova_reserved_host_memory_mb: 0
nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
nova_scheduler_use_baremetal_filters: True
nova_scheduler_tracks_instance_changes: False
kvm:
nova_compute_driver: libvirt.LibvirtDriver
nova_scheduler_host_manager: host_manager
nova_reserved_host_memory_mb: 2048
nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
nova_scheduler_use_baremetal_filters: False
nova_scheduler_tracks_instance_changes: True
lxd:
nova_compute_driver: lxd.LXDDriver
nova_scheduler_host_manager: host_manager
nova_reserved_host_memory_mb: 2048
nova_compute_manager: nova.compute.manager.ComputeManager
nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
nova_scheduler_use_baremetal_filters: False
nova_scheduler_tracks_instance_changes: True
qemu:
nova_compute_driver: libvirt.LibvirtDriver
nova_scheduler_host_manager: host_manager
nova_reserved_host_memory_mb: 2048
nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
nova_scheduler_use_baremetal_filters: False
nova_scheduler_tracks_instance_changes: True
powervm:
nova_compute_driver: powervm.driver.PowerVMDriver
nova_scheduler_host_manager: host_manager
nova_reserved_host_memory_mb: 8192
nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
nova_scheduler_use_baremetal_filters: False
nova_scheduler_tracks_instance_changes: True
# If this is not set, then the playbook will try to guess it.
#nova_virt_type: kvm
#if set, nova_virt_type must be one of these:
nova_supported_virt_types:
- qemu
- kvm
- lxd
- ironic
- powervm
## Nova Auth
nova_service_region: RegionOne
nova_service_project_name: "service"
nova_service_project_domain_id: default
nova_service_user_domain_id: default
nova_service_user_name: "nova"
nova_service_role_name: "admin"
## Keystone authentication middleware
nova_keystone_auth_plugin: password
## Nova enabled apis
nova_enabled_apis: "osapi_compute,metadata"
## Nova v2.1
nova_service_name: nova
nova_service_type: compute
nova_service_proto: http
nova_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(nova_service_proto) }}"
nova_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(nova_service_proto) }}"
nova_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(nova_service_proto) }}"
nova_service_port: 8774
nova_service_description: "Nova Compute Service"
nova_service_publicuri: "{{ nova_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ nova_service_port }}"
nova_service_publicurl: "{{ nova_service_publicuri }}/v2.1/%(tenant_id)s"
nova_service_adminuri: "{{ nova_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ nova_service_port }}"
nova_service_adminurl: "{{ nova_service_adminuri }}/v2.1/%(tenant_id)s"
nova_service_internaluri: "{{ nova_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ nova_service_port }}"
nova_service_internalurl: "{{ nova_service_internaluri }}/v2.1/%(tenant_id)s"
## Nova cinder
nova_cross_az_attach: True
## Nova spice
nova_spice_html5proxy_base_proto: "{{ openstack_service_publicuri_proto | default('http') }}"
nova_spice_html5proxy_base_port: 6082
nova_spice_html5proxy_base_uri: "{{ nova_spice_html5proxy_base_proto }}://{{ external_lb_vip_address }}:{{ nova_spice_html5proxy_base_port }}"
nova_spice_html5proxy_base_url: "{{ nova_spice_html5proxy_base_uri }}/spice_auto.html"
nova_spice_console_keymap: en-us
nova_spice_console_agent_enabled: True
nova_spicehtml5_git_repo: https://github.com/SPICE/spice-html5
nova_spicehtml5_git_install_branch: master
## Nova novnc
nova_novncproxy_proto: "{{ openstack_service_publicuri_proto | default('http') }}"
nova_novncproxy_port: 6080
nova_novncproxy_base_uri: "{{ nova_novncproxy_proto }}://{{ external_lb_vip_address }}:{{ nova_novncproxy_port }}"
nova_novncproxy_base_url: "{{ nova_novncproxy_base_uri }}/vnc_auto.html"
nova_novncproxy_vncserver_proxyclient_address: "{{ ansible_host }}"
nova_novncproxy_vncserver_listen: "{{ ansible_host }}"
nova_novncproxy_agent_enabled: True
nova_novncproxy_git_repo: https://github.com/kanaka/novnc
nova_novncproxy_git_install_branch: master
nova_novncproxy_vnc_keymap: en-us
## Nova metadata
nova_metadata_proxy_enabled: "{{ nova_network_services[nova_network_type]['metadata_proxy_enabled'] | bool }}"
nova_metadata_host: "{{ internal_lb_vip_address }}"
nova_metadata_port: 8775
## Nova compute
nova_enable_instance_password: True
nova_force_config_drive: False
## Nova libvirt
# Warning: If nova_libvirt_inject_key or nova_libvirt_inject_password are enabled for Ubuntu compute hosts
# then the kernel will be made readable to nova user (a requirement for libguestfs).
# See Launchpad bugs 1507915 (Nova), 759725 (Ubuntu), and http://libguestfs.org/guestfs-faq.1.html
nova_libvirt_inject_key: False
# inject partition options:
# -2 => disable, -1 => inspect (libguestfs only), 0 => not partitioned, >0 => partition number
nova_libvirt_inject_partition: -2
nova_libvirt_inject_password: False
nova_libvirt_disk_cachemodes: '{{ nova_libvirt_images_rbd_pool is defined | ternary("network=writeback", "") }}'
nova_libvirt_hw_disk_discard: '{{ nova_libvirt_images_rbd_pool is defined | ternary("unmap", "ignore") }}'
## Nova console
nova_console_agent_enabled: True
nova_console_keymap: en-us
# Set the console type. Presently the only options are ["spice", "novnc"].
nova_console_type: spice
# Nova console ssl info, presently only used by novnc console type
nova_console_ssl_dir: "/etc/nova/ssl"
nova_console_ssl_cert: "{{ nova_console_ssl_dir }}/nova-console.pem"
nova_console_ssl_key: "{{ nova_console_ssl_dir }}/nova-console.key"
# Set to true when terminating SSL/TLS at a load balancer
nova_external_ssl: false
# External SSL forwarding proto
nova_secure_proxy_ssl_header: HTTP_X_FORWARDED_PROTO
## Nova global config
nova_cpu_mode: host-model
nova_linuxnet_interface_driver: nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
nova_max_age: 0
nova_remove_unused_resized_minimum_age_seconds: 3600
nova_image_cache_manager_interval: 0
nova_resume_guests_state_on_host_boot: False
nova_dhcp_domain: novalocal
nova_default_schedule_zone: nova
# Comma separated list of Glance API servers
# nova_glance_api_servers:
nova_network_type: linuxbridge
nova_network_services:
linuxbridge:
use_forwarded_for: False
metadata_proxy_enabled: True
nuage:
use_forwarded_for: True
metadata_proxy_enabled: True
ovs_bridge: alubr0
calico:
use_forwarded_for: True
metadata_proxy_enabled: False
# Nova quota
nova_quota_cores: 20
nova_quota_fixed_ips: -1
nova_quota_floating_ips: 10
nova_quota_injected_file_content_bytes: 10240
nova_quota_injected_file_path_length: 255
nova_quota_injected_files: 5
nova_quota_instances: 10
nova_quota_key_pairs: 100
nova_quota_metadata_items: 128
nova_quota_networks: 3
nova_quota_ram: 51200
nova_quota_security_group_rules: 20
nova_quota_security_groups: 10
nova_quota_server_group_members: 10
nova_quota_server_groups: 10
# Nova Scheduler
nova_cpu_allocation_ratio: 2.0
nova_disk_allocation_ratio: 1.0
nova_max_instances_per_host: 50
nova_max_io_ops_per_host: 10
nova_ram_allocation_ratio: 1.0
nova_ram_weight_multiplier: 5.0
nova_reserved_host_disk_mb: 2048
nova_scheduler_driver_task_period: 60
nova_scheduler_host_subset_size: 10
nova_scheduler_max_attempts: 5
nova_scheduler_default_filters: "RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,AggregateCoreFilter,AggregateDiskFilter"
nova_scheduler_driver: filter_scheduler
nova_scheduler_available_filters: nova.scheduler.filters.all_filters
nova_scheduler_weight_classes: nova.scheduler.weights.all_weighers
# If you want to regenerate the nova users SSH keys, on each run, set this var to True
# Otherwise keys will be generated on the first run and not regenerated each run.
nova_recreate_keys: False
# Nova Ceph rbd
# Enble and define nova_libvirt_images_rbd_pool to use rbd as nova backend
#nova_libvirt_images_rbd_pool: vms
nova_ceph_client: '{{ cinder_ceph_client }}'
# TODO(odyssey4me) - the uuid should be removed, there should be no defaults for secrets
nova_ceph_client_uuid: 517a4663-3927-44bc-9ea7-4a90e1cd4c66
## General Nova configuration
# If ``nova_osapi_compute_workers`` is unset the system will use half the number of available VCPUS to
# compute the number of api workers to use.
# nova_osapi_compute_workers: 16
# If ``nova_conductor_workers`` is unset the system will use half the number of available VCPUS to
# compute the number of api workers to use.
# nova_conductor_workers: 16
# If ``nova_metadata_workers`` is unset the system will use half the number of available VCPUS to
# compute the number of api workers to use.
# nova_metadata_workers: 16
## Policy vars
# Provide a list of access controls to update the default policy.json with. These changes will be merged
# with the access controls in the default policy.json. E.g.
#nova_policy_overrides:
# "compute:create": ""
# "compute:create:attach_network": ""
nova_service_in_ldap: false
## libvirtd config options
nova_libvirtd_listen_tls: 1
nova_libvirtd_listen_tcp: 0
nova_libvirtd_auth_tcp: sasl
nova_libvirtd_debug_log_filters: "3:remote 4:event 3:json 3:rpc"
nova_api_metadata_init_overrides: {}
nova_api_os_compute_init_overrides: {}
nova_cert_init_overrides: {}
nova_compute_init_overrides: {}
nova_conductor_init_overrides: {}
nova_consoleauth_init_overrides: {}
nova_novncproxy_init_overrides: {}
nova_scheduler_init_overrides: {}
nova_spicehtml5proxy_init_overrides: {}
## Service Name-Group Mapping
nova_services:
nova-api-metadata:
group: nova_api_metadata
service_name: nova-api-metadata
init_config_overrides: "{{ nova_api_metadata_init_overrides }}"
start_order: 4
nova-api-os-compute:
group: nova_api_os_compute
service_name: nova-api-os-compute
init_config_overrides: "{{ nova_api_os_compute_init_overrides }}"
start_order: 3
nova-cert:
group: nova_cert
service_name: nova-cert
init_config_overrides: "{{ nova_cert_init_overrides }}"
start_order: 2
nova-compute:
group: nova_compute
service_name: nova-compute
init_config_overrides: "{{ nova_compute_init_overrides }}"
start_order: 5
nova-conductor:
group: nova_conductor
service_name: nova-conductor
init_config_overrides: "{{ nova_conductor_init_overrides }}"
start_order: 1
nova-consoleauth:
group: nova_console
service_name: nova-consoleauth
init_config_overrides: "{{ nova_consoleauth_init_overrides }}"
start_order: 2
nova-novncproxy:
group: nova_console
service_name: nova-novncproxy
init_config_overrides: "{{ nova_novncproxy_init_overrides }}"
condition: "{{ nova_console_type == 'novnc' }}"
start_order: 4
nova-scheduler:
group: nova_scheduler
service_name: nova-scheduler
init_config_overrides: "{{ nova_scheduler_init_overrides }}"
start_order: 2
nova-spicehtml5proxy:
group: nova_console
service_name: nova-spicehtml5proxy
init_config_overrides: "{{ nova_spicehtml5proxy_init_overrides }}"
condition: "{{ nova_console_type == 'spice' }}"
start_order: 4
nova_novnc_pip_packages:
- websockify
# nova packages that must be installed before anything else
nova_requires_pip_packages:
- virtualenv
- virtualenv-tools
- python-keystoneclient # Keystoneclient needed to OSA keystone lib
- httplib2
nova_compute_pip_packages:
- libvirt-python
nova_compute_ironic_pip_packages:
- python-ironicclient
# Common pip packages
nova_pip_packages:
- PyMySQL
- python-memcached
- pycrypto
- python-keystoneclient
- python-novaclient
- keystonemiddleware
- nova
nova_compute_lxd_pip_packages:
- pylxd
- nova-lxd
nova_qemu_user: libvirt-qemu
nova_qemu_group: kvm
# Define the following variable to drop a replacement
# file for /etc/libvirt/qemu.conf
#qemu_conf_dict: {}
# This variable is used by the repo_build process to determine
# which host group to check for members of before building the
# pip packages required by this role. The value is picked up
# by the py_pkgs lookup.
nova_role_project_group: nova_all
## Tunable overrides
nova_nova_conf_overrides: {}
nova_rootwrap_conf_overrides: {}
nova_api_paste_ini_overrides: {}
nova_policy_overrides: {}
nova_compute_powervm_pip_packages:
- nova-powervm
- pyasn1-modules
libvirt_default_file: libvirt-bin
libvirt_group: libvirtd
lxd_bind_address: 0.0.0.0
lxd_bind_port: 8443
lxd_storage_backend: dir
# This needs to be set in the user_secrets.yml file.
#lxd_trust_password:
# This variable should be used with lxd when using a
# storage backend that utilizes storage pools
#lxd_storage_pool:
# PCI devices passthrough to nova
# For SR-IOV please use:
# nova_pci_passthrough_whitelist: '{ "physical_network": "<ml2 network name>", "devname": "<physical nic>" }'
# Example:
# nova_pci_passthrough_whitelist: '{ "physical_network": "physnet1", "devname": "p1p1" }'
nova_pci_passthrough_whitelist: {}
.. code-block:: yaml
- name: Installation and setup of Neutron
hosts: neutron_all
user: root
roles:
- { role: "os_neutron", tags: [ "os-neutron" ] }
vars:
neutron_galera_address: "{{ internal_lb_vip_address }}"
This role supports two tags: nova-install and nova-config
The nova-install tag can be used to install and upgrade.
The nova-config tag can be used to manage configuration.
This role supports multiple CPU architecture types. At least one repo_build node must exist for each CPU type that is in use in the deployment.
At this time, ppc64le is only supported for the Compute node type. It can not be used to manage the OpenStack-Ansible management nodes.
This role supports multiple nova compute driver types. The following compute drivers are supported:
The driver type is automatically detected by the OpenStack Ansible Nova role for the following compute driver types:
Any mix and match of compute node types can be used for those platforms, except for ironic.
If using the lxd driver, the compute type must be specified using the nova_virt_type variable.
The nova_virt_type may be set in /etc/openstack_deploy/user_variables.yml, for example:
nova_virt_type: lxd
You can set nova_virt_type per host by using host_vars in /etc/openstack_deploy/openstack_user_config.yml. For example:
compute_hosts: aio1: ip: 172.29.236.100 host_vars: nova_virt_type: lxd
If nova_virt_type is set in /etc/openstack_deploy/user_variables.yml, all nodes in the deployment are set to that hypervisor type. Setting nova_virt_type in both /etc/openstack_deploy/user_variables.yml and /etc/openstack_deploy/openstack_user_config.yml will always result in the value specified in /etc/openstack_deploy/user_variables.yml being set on all hosts.