Commit 90f65bef authored by Steve Munene's avatar Steve Munene

Configs for the first Playbook run

parents
Pipeline #55 failed with stages
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Overview
# ========
#
# This file contains the configuration for OpenStack Ansible Deployment
# (OSA) core services. Optional service configuration resides in the
# conf.d directory.
#
# You can customize the options in this file and copy it to
# /etc/openstack_deploy/openstack_user_config.yml or create a new
# file containing only necessary options for your environment
# before deployment.
#
# OSA implements PyYAML to parse YAML files and therefore supports structure
# and formatting options that augment traditional YAML. For example, aliases
# or references. For more information on PyYAML, see the documentation at
#
# http://pyyaml.org/wiki/PyYAMLDocumentation
#
# Configuration reference
# =======================
#
# Level: cidr_networks (required)
# Contains an arbitrary list of networks for the deployment. For each network,
# the inventory generator uses the IP address range to create a pool of IP
# addresses for network interfaces inside containers. A deployment requires
# at least one network for management.
#
# Option: <value> (required, string)
# Name of network and IP address range in CIDR notation. This IP address
# range coincides with the IP address range of the bridge for this network
# on the target host.
#
# Example:
#
# Define networks for a typical deployment.
#
# - Management network on 172.29.236.0/22. Control plane for infrastructure
# services, OpenStack APIs, and horizon.
# - Tunnel network on 172.29.240.0/22. Data plane for project (tenant) VXLAN
# networks.
# - Storage network on 172.29.244.0/22. Data plane for storage services such
# as cinder and swift.
#
cidr_networks: &cidr_networks
container: 172.20.16.0/24
tunnel: 172.21.16.0/24
storage: 172.16.32.0/24
#
# Example:
#
# Define additional service network on 172.29.248.0/22 for deployment in a
# Rackspace data center.
#
# snet: 172.29.248.0/22
#
# --------
#
# Level: used_ips (optional)
# For each network in the 'cidr_networks' level, specify a list of IP addresses
# or a range of IP addresses that the inventory generator should exclude from
# the pools of IP addresses for network interfaces inside containers. To use a
# range, specify the lower and upper IP addresses (inclusive) with a comma
# separator.
#
# Example:
#
# The management network includes a router (gateway) on 172.29.236.1 and
# DNS servers on 172.29.236.11-12. The deployment includes seven target
# servers on 172.29.236.101-103, 172.29.236.111, 172.29.236.121, and
# 172.29.236.131. However, the inventory generator automatically excludes
# these IP addresses. The deployment host itself isn't automatically
# excluded. Network policy at this particular example organization
# also reserves 231-254 in the last octet at the high end of the range for
# network device management.
#
used_ips:
- "172.20.16.1,172.20.16.51"
- "172.20.16.200,172.20.16.255"
- "172.16.32.1,172.16.32.50"
- 172.16.32.134
# --------
global_overrides:
cidr_networks: *cidr_networks
internal_lb_vip_address: 172.20.16.51
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "ens9f0"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "ens9f0"
container_mtu: "9000"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "ens9f0"
container_mtu: "9000"
ip_from_q: "tunnel"
type: "vxlan"
range: "400:1000"
net_name: "vxlan"
group_binds:
- neutron_linuxbridge_agent
# Level: global_overrides (required)
# Contains global options that require customization for a deployment. For
# example, load balancer virtual IP addresses (VIP). This level also provides
# a mechanism to override other options defined in the playbook structure.
#
# Option: internal_lb_vip_address (required, string)
# Load balancer VIP for the following items:
#
# - Local package repository
# - Galera SQL database cluster
# - Administrative and internal API endpoints for all OpenStack services
# - Glance registry
# - Nova compute source of images
# - Cinder source of images
# - Instance metadata
#
# Option: external_lb_vip_address (required, string)
# Load balancer VIP for the following items:
#
# - Public API endpoints for all OpenStack services
# - Horizon
#
# Option: management_bridge (required, string)
# Name of management network bridge on target hosts. Typically 'br-mgmt'.
#
# Level: provider_networks (required)
# List of container and bare metal networks on target hosts.
#
# Level: network (required)
# Defines a container or bare metal network. Create a level for each
# network.
#
# Option: type (required, string)
# Type of network. Networks other than those for neutron such as
# management and storage typically use 'raw'. Neutron networks use
# 'flat', 'vlan', or 'vxlan'. Coincides with ML2 plug-in configuration
# options.
#
# Option: container_bridge (required, string)
# Name of unique bridge on target hosts to use for this network. Typical
# values include 'br-mgmt', 'br-storage', 'br-vlan', 'br-vxlan', etc.
#
# Option: container_interface (required, string)
# Name of unique interface in containers to use for this network.
# Typical values include 'eth1', 'eth2', etc. This option is OPTIONAL
# for Neutron provider network definitions when Neutron agents are
# deployed on bare metal (default), but REQUIRED when agents are
# deployed in containers and for all other non-Neutron use-cases.
# NOTE: Container interface is different from host interfaces.
#
# Option: container_type (required, string)
# Name of mechanism that connects interfaces in containers to the bridge
# on target hosts for this network. Typically 'veth'.
#
# Option: host_bind_override (optional, string)
# Name of the physical network interface on the same L2 network being
# used with the br-vlan device. This host_bind_override should only
# be set for the ' container_bridge: "br-vlan" '.
# This interface is optional but highly recommended for vlan based
# OpenStack networking.
# If no additional network interface is available, a deployer can create
# a veth pair, and plug it into the br-vlan bridge to provide
# this interface. An example could be found in the aio_interfaces.cfg
# file.
#
# Option: container_mtu (optional, string)
# Sets the MTU within LXC for a given network type.
#
# Option: ip_from_q (optional, string)
# Name of network in 'cidr_networks' level to use for IP address pool. Only
# valid for 'raw' and 'vxlan' types.
#
# Option: address_prefix (option, string)
# Override for the prefix of the key added to each host that contains IP
# address information for this network. By default, this will be the name
# given in 'ip_from_q' with a fallback of the name of the interface given in
# 'container_interface'.
# (e.g., 'ip_from_q'_address and 'container_interface'_address)
#
# Option: is_container_address (required, boolean)
# If true, the load balancer uses this IP address to access services
# in the container. Only valid for networks with 'ip_from_q' option.
#
# Option: group_binds (required, string)
# List of one or more Ansible groups that contain this
# network. For more information, see the env.d YAML files.
#
# Option: reference_group (optional, string)
# An Ansible group that a host must be a member of, in addition to any of the
# groups within 'group_binds', for this network to apply.
#
# Option: net_name (optional, string)
# Name of network for 'flat' or 'vlan' types. Only valid for these
# types. Coincides with ML2 plug-in configuration options.
#
# Option: range (optional, string)
# For 'vxlan' type neutron networks, range of VXLAN network identifiers
# (VNI). For 'vlan' type neutron networks, range of VLAN tags. Supports
# more than one range of VLANs on a particular network. Coincides with
# ML2 plug-in configuration options.
#
# Option: static_routes (optional, list)
# List of additional routes to give to the container interface.
# Each item is composed of cidr and gateway. The items will be
# translated into the container network interfaces configuration
# as a `post-up ip route add <cidr> via <gateway> || true`.
#
# Option: gateway (optional, string)
# String containing the IP of the default gateway used by the
# container. Generally not needed: the containers will have
# their default gateway set with dnsmasq, poitining to the host
# which does natting for container connectivity.
#
# Example:
#
# Define a typical network architecture:
#
# - Network of type 'raw' that uses the 'br-mgmt' bridge and 'management'
# IP address pool. Maps to the 'eth1' device in containers. Applies to all
# containers and bare metal hosts. Both the load balancer and Ansible
# use this network to access containers and services.
# - Network of type 'raw' that uses the 'br-storage' bridge and 'storage'
# IP address pool. Maps to the 'eth2' device in containers. Applies to
# nova compute and all storage service containers. Optionally applies to
# to the swift proxy service.
# - Network of type 'vxlan' that contains neutron VXLAN tenant networks
# 1 to 1000 and uses 'br-vxlan' bridge on target hosts. Maps to the
# 'eth10' device in containers. Applies to all neutron agent containers
# and neutron agents on bare metal hosts.
# - Network of type 'vlan' that contains neutron VLAN networks 101 to 200
# and 301 to 400 and uses the 'br-vlan' bridge on target hosts. Maps to
# the 'eth11' device in containers. Applies to all neutron agent
# containers and neutron agents on bare metal hosts.
# - Network of type 'flat' that contains one neutron flat network and uses
# the 'br-vlan' bridge on target hosts. Maps to the 'eth12' device in
# containers. Applies to all neutron agent containers and neutron agents
# on bare metal hosts.
#
# Note: A pair of 'vlan' and 'flat' networks can use the same bridge because
# one only handles tagged frames and the other only handles untagged frames
# (the native VLAN in some parlance). However, additional 'vlan' or 'flat'
# networks require additional bridges.
#
# provider_networks:
# - network:
# group_binds:
# - all_containers
# - hosts
# type: "raw"
# container_bridge: "br-mgmt"
# container_interface: "eth1"
# container_type: "veth"
# ip_from_q: "container"
# is_container_address: true
# - network:
# group_binds:
# - glance_api
# - cinder_api
# - cinder_volume
# - nova_compute
# # Uncomment the next line if using swift with a storage network.
# # - swift_proxy
# type: "raw"
# container_bridge: "br-storage"
# container_type: "veth"
# container_interface: "eth2"
# container_mtu: "9000"
# ip_from_q: "storage"
# - network:
# group_binds:
# - neutron_linuxbridge_agent
# container_bridge: "br-vxlan"
# container_type: "veth"
# container_interface: "eth10"
# container_mtu: "9000"
# ip_from_q: "tunnel"
# type: "vxlan"
# range: "1:1000"
# net_name: "vxlan"
# - network:
# group_binds:
# - neutron_linuxbridge_agent
# container_bridge: "br-vlan"
# container_type: "veth"
# container_interface: "eth11"
# type: "vlan"
# range: "101:200,301:400"
# net_name: "vlan"
# - network:
# group_binds:
# - neutron_linuxbridge_agent
# container_bridge: "br-vlan"
# container_type: "veth"
# container_interface: "eth12"
# host_bind_override: "eth12"
# type: "flat"
# net_name: "flat"
#
# --------
#
# Level: shared-infra_hosts (required)
# List of target hosts on which to deploy shared infrastructure services
# including the Galera SQL database cluster, RabbitMQ, and Memcached. Recommend
# three minimum target hosts for these services.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three shared infrastructure hosts:
#
shared-infra_hosts:
infra1:
ip: 172.20.16.33
infra2:
ip: 172.20.16.34
infra3:
ip: 172.20.16.25
infra4:
ip: 172.20.16.36
#
# List of target hosts on which to deploy shared infrastructure services
# and define the container_tech for a specific infra node. If this setting
# is omitted the inventory generation system will default to "lxc". Accpetable
# options are "lxc" and "nspawn".
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Level: container_vars (required)
# Contains storage options for this target host.
#
# Example:
#
# Define three shared infrastructure hosts with different "container_tech":
#
shared-infra_hosts:
infra1:
ip: 172.20.16.33
container_vars:
container_tech: nspawn
infra2:
ip: 172.20.16.34
container_vars:
container_tech: lxc
infra3:
ip: 172.20.16.25
container_vars:
container_tech: nspawn
infra4:
ip: 172.20.16.36
#
# --------
#
# Level: repo-infra_hosts (required)
# List of target hosts on which to deploy the package repository. Recommend
# minimum three target hosts for this service. Typically contains the same
# target hosts as the 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three package repository hosts:
#
repo-infra_hosts:
infra1:
ip: 172.20.16.33
infra2:
ip: 172.20.16.34
infra3:
ip: 172.20.16.25
infra4:
ip: 172.20.16.36
#
# --------
#
# Level: os-infra_hosts (required)
# List of target hosts on which to deploy the glance API, nova API, heat API,
# and horizon. Recommend three minimum target hosts for these services.
# Typically contains the same target hosts as 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three OpenStack infrastructure hosts:
#
os-infra_hosts:
infra1:
ip: 172.20.16.33
infra2:
ip: 172.20.16.34
infra3:
ip: 172.20.16.25
infra4:
ip: 172.20.16.36
# --------
#
# Level: identity_hosts (required)
# List of target hosts on which to deploy the keystone service. Recommend
# three minimum target hosts for this service. Typically contains the same
# target hosts as the 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three OpenStack identity hosts:
#
identity_hosts:
infra1:
ip: 172.20.16.33
infra2:
ip: 172.20.16.34
infra3:
ip: 172.20.16.25
infra4:
ip: 172.20.16.36
#
# --------
#
# Level: network_hosts (required)
# List of target hosts on which to deploy neutron services. Recommend three
# minimum target hosts for this service. Typically contains the same target
# hosts as the 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three OpenStack network hosts:
#
network_hosts:
infra1:
ip: 172.20.16.33
infra2:
ip: 172.20.16.34
infra3:
ip: 172.20.16.25
infra4:
ip: 172.20.16.36
#
# --------
#
# Level: compute_hosts (optional)
# List of target hosts on which to deploy the nova compute service. Recommend
# one minimum target host for this service. Typically contains target hosts
# that do not reside in other levels.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define an OpenStack compute host:
#
compute_hosts:
compute1:
ip: 172.20.16.37
compute2:
ip: 172.20.16.38
#
# --------
#
# Level: ironic-compute_hosts (optional)
# List of target hosts on which to deploy the nova compute service for Ironic.
# Recommend one minimum target host for this service. Typically contains target
# hosts that do not reside in other levels.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define an OpenStack compute host:
#
# ironic-compute_hosts:
# ironic-infra1:
# ip: 172.29.236.121
#
# --------
#
# Level: storage-infra_hosts (required)
# List of target hosts on which to deploy the cinder API. Recommend three
# minimum target hosts for this service. Typically contains the same target
# hosts as the 'shared-infra_hosts' level.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define three OpenStack storage infrastructure hosts:
#
storage-infra_hosts:
infra1:
ip: 172.20.16.33
infra2:
ip: 172.20.16.34
infra3:
ip: 172.20.16.25
infra4:
ip: 172.20.16.36
#
# --------
#
# Level: storage_hosts (required)
# List of target hosts on which to deploy the cinder volume service. Recommend
# one minimum target host for this service. Typically contains target hosts
# that do not reside in other levels.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Level: container_vars (required)
# Contains storage options for this target host.
#
# Option: cinder_storage_availability_zone (optional, string)
# Cinder availability zone.
#
# Option: cinder_default_availability_zone (optional, string)
# If the deployment contains more than one cinder availability zone,
# specify a default availability zone.
#
# Level: cinder_backends (required)
# Contains cinder backends.
#
# Option: limit_container_types (optional, string)
# Container name string in which to apply these options. Typically
# any container with 'cinder_volume' in the name.
#
# Level: <value> (required, string)
# Arbitrary name of the backend. Each backend contains one or more
# options for the particular backend driver. The template for the
# cinder.conf file can generate configuration for any backend
# providing that it includes the necessary driver options.
#
# Option: volume_backend_name (required, string)
# Name of backend, arbitrary.
#
# The following options apply to the LVM backend driver:
#
# Option: volume_driver (required, string)
# Name of volume driver, typically
# 'cinder.volume.drivers.lvm.LVMVolumeDriver'.
#
# Option: volume_group (required, string)
# Name of LVM volume group, typically 'cinder-volumes'.
#
# The following options apply to the NFS backend driver:
#
# Option: volume_driver (required, string)
# Name of volume driver,
# 'cinder.volume.drivers.nfs.NfsDriver'.
# NB. When using NFS driver you may want to adjust your
# env.d/cinder.yml file to run cinder-volumes in containers.
#
# Option: nfs_shares_config (optional, string)
# File containing list of NFS shares available to cinder, typically
# '/etc/cinder/nfs_shares'.
#
# Option: nfs_mount_point_base (optional, string)
# Location in which to mount NFS shares, typically
# '$state_path/mnt'.
#
# Option: nfs_mount_options (optional, string)
# Mount options used for the NFS mount points.
#
# Option: shares (required)
# List of shares to populate the 'nfs_shares_config' file. Each share
# uses the following format:
# - { ip: "{{ ip_nfs_server }}", share "/vol/cinder" }
#
# The following options apply to the NetApp backend driver:
#
# Option: volume_driver (required, string)
# Name of volume driver,
# 'cinder.volume.drivers.netapp.common.NetAppDriver'.
# NB. When using NetApp drivers you may want to adjust your
# env.d/cinder.yml file to run cinder-volumes in containers.
#
# Option: netapp_storage_family (required, string)
# Access method, typically 'ontap_7mode' or 'ontap_cluster'.
#
# Option: netapp_storage_protocol (required, string)
# Transport method, typically 'scsi' or 'nfs'. NFS transport also
# requires the 'nfs_shares_config' option.
#
# Option: nfs_shares_config (required, string)
# For NFS transport, name of the file containing shares. Typically
# '/etc/cinder/nfs_shares'.
#
# Option: netapp_server_hostname (required, string)
# NetApp server hostname.
#
# Option: netapp_server_port (required, integer)
# NetApp server port, typically 80 or 443.
#
# Option: netapp_login (required, string)
# NetApp server username.
#
# Option: netapp_password (required, string)
# NetApp server password.
#
# Example:
#
# Define an OpenStack storage host:
#
# storage_hosts:
# lvm-storage1:
# ip: 172.29.236.131
#
# Example:
#
# Use the LVM iSCSI backend in availability zone 'cinderAZ_1':
#
# container_vars:
# cinder_storage_availability_zone: cinderAZ_1
# cinder_default_availability_zone: cinderAZ_1
# cinder_backends:
# lvm:
# volume_backend_name: LVM_iSCSI
# volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
# volume_group: cinder-volumes
# iscsi_ip_address: "{{ cinder_storage_address }}"
# limit_container_types: cinder_volume
#
# Example:
#
# Use the NetApp iSCSI backend via Data ONTAP 7-mode in availability zone
# 'cinderAZ_2':
#
# container_vars:
# cinder_storage_availability_zone: cinderAZ_2
# cinder_default_availability_zone: cinderAZ_1
# cinder_backends:
# netapp:
# volume_backend_name: NETAPP_iSCSI
# volume_driver: cinder.volume.drivers.netapp.common.NetAppDriver
# netapp_storage_family: ontap_7mode
# netapp_storage_protocol: iscsi
# netapp_server_hostname: hostname
# netapp_server_port: 443
# netapp_login: username
# netapp_password: password
#
# Example
#
# Use the QNAP iSCSI backend in availability zone
# 'cinderAZ_2':
#
# container_vars:
# cinder_storage_availability_zone: cinderAZ_2
# cinder_default_availability_zone: cinderAZ_1
# cinder_backends:
# limit_container_types: cinder_volume
# qnap:
# volume_backend_name: "QNAP 1 VOLUME"
# volume_driver: cinder.volume.drivers.qnap.QnapISCSIDriver
# qnap_management_url : http://10.10.10.5:8080
# qnap_poolname: "Storage Pool 1"
# qnap_storage_protocol: iscsi
# qnap_server_port: 8080
# iscsi_ip_address: 172.29.244.5
# san_login: username
# san_password: password
# san_thin_provision: True
#
#
# Example:
#
# Use the ceph RBD backend in availability zone 'cinderAZ_3':
#
# container_vars:
# cinder_storage_availability_zone: cinderAZ_3
# cinder_default_availability_zone: cinderAZ_1
# cinder_backends:
# limit_container_types: cinder_volume
# volumes_hdd:
# volume_driver: cinder.volume.drivers.rbd.RBDDriver
# rbd_pool: volumes_hdd
# rbd_ceph_conf: /etc/ceph/ceph.conf
# rbd_flatten_volume_from_snapshot: 'false'
# rbd_max_clone_depth: 5
# rbd_store_chunk_size: 4
# rados_connect_timeout: -1
# volume_backend_name: volumes_hdd
# rbd_user: "{{ cinder_ceph_client }}"
# rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
#
#
# Example:
#
# Use the cephfs (NATIVE) backend with manila:
#
# container_vars:
# manila_default_share_type: cephfs1
# manila_backends:
# cephfs1:
# driver_handles_share_servers: False
# share_backend_name: CEPHFS1
# share_driver: manila.share.drivers.cephfs.driver.CephFSDriver
# cephfs_conf_path: /etc/ceph/ceph.conf
# cephfs_auth_id: manila
# cephfs_cluster_name: ceph
# cephfs_enable_snapshots: False
# filter_function: "share.size >= 0"
# goodness_function: "share.size >= 0"
#
#
# Use the cephfs + NFS backend with manila:
#
# container_vars:
# manila_default_share_type: cephfsnfs1
# manila_backends:
# cephfsnfs1:
# driver_handles_share_servers: False
# share_backend_name: CEPHFSNFS1
# share_driver: manila.share.drivers.cephfs.driver.CephFSDriver
# cephfs_ganesha_server_ip: 172.16.24.200
# cephfs_protocol_helper_type: NFS
# cephfs_conf_path: /etc/ceph/ceph.conf
# cephfs_auth_id: manila
# filter_function: "share.size >= 0"
# goodness_function: "share.size >= 0"
#
#
# Example:
#
# Use the lvm backend with manila:
#
# container_vars:
# manila_default_share_type: nfs-share1
# manila_backends:
# nfs-share1:
# share_backend_name: NFS_SHARE1
# share_driver: manila.share.drivers.lvm.LVMShareDriver
# driver_handles_share_servers: False
# lvm_share_volume_group: manila-shares
# lvm_share_export_ip: "10.1.1.1"
# filter_function: "share.size >= 0"
# goodness_function: "share.size >= 0"
#
#
#
# Example:
#
# Use the generic backend with manila:
#
# container_vars:
# manila_default_share_type: generic
# manila_backends:
# generic:
# share_backend_name: GENERIC
# share_driver: manila.share.drivers.generic.GenericShareDriver
# driver_handles_share_servers: True
# service_instance_flavor_id: 100
# service_image_name: manila-service-image
# service_instance_user: manila
# service_instance_password: manila
# interface_driver: manila.network.linux.interface.BridgeInterfaceDriver
# filter_function: "share.size >= 0"
# goodness_function: "share.size >= 0"
#
# --------
#
# Level: log_hosts (optional)
# List of target hosts on which to deploy logging services. Recommend
# one minimum target host for this service.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
# Example:
#
# Define a logging host:
#
log_hosts:
log1:
ip: 172.20.16.204
#
# --------
#
# Level: haproxy_hosts (optional)
# List of target hosts on which to deploy HAProxy. Recommend at least one
# target host for this service if hardware load balancers are not being
# used.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
#
# Example:
#
# Define a virtual load balancer (HAProxy):
#
# While HAProxy can be used as a virtual load balancer, it is recommended to use
# a physical load balancer in a production environment.
#
# haproxy_hosts:
# lb1:
# ip: 172.29.236.100
# lb2:
# ip: 172.29.236.101
#
# In case of the above scenario(multiple hosts),HAProxy can be deployed in a
# highly-available manner by installing keepalived.
#
# To make keepalived work, edit at least the following variables
# in ``user_variables.yml``:
# haproxy_keepalived_external_vip_cidr: 192.168.0.4/25
# haproxy_keepalived_internal_vip_cidr: 172.29.236.54/16
# haproxy_keepalived_external_interface: br-flat
# haproxy_keepalived_internal_interface: br-mgmt
#
# To always deploy (or upgrade to) the latest stable version of keepalived.
# Edit the ``/etc/openstack_deploy/user_variables.yml``:
# keepalived_package_state: latest
#
# The group_vars/all/keepalived.yml contains the keepalived
# variables that are fed into the keepalived role during
# the haproxy playbook.
# You can change the keepalived behavior for your
# deployment. Refer to the ``user_variables.yml`` file for
# more information.
#
# Keepalived cam ping a public IP address to check its status. To enable this
# feature, set the ``keepalived_ping_address`` variable in the
# ``user_variables.yml`` file.
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
### This file contains commonly used overrides for convenience. Please inspect
### the defaults for each role to find additional override options.
###
## Debug and Verbose options.
debug: false
## Set the service setup host
# The default is to use localhost (the deploy host where ansible runs),
# but any other host can be used. If using an alternative host with all
# required libraries in a venv (eg: the utility container) then the
# python interpreter needs to be set. If it is not, the default is to
# the system python interpreter.
# If you wish to use the first utility container in the inventory for
# all service setup tasks, uncomment the following.
#
#openstack_service_setup_host: "{{ groups['utility_all'][0] }}"
#openstack_service_setup_host_python_interpreter: "/openstack/venvs/utility-{{ openstack_release }}/bin/python"
## Installation method for OpenStack services
# Default option (source) is to install the OpenStack services using PIP
# packages. An alternative method (distro) is to use the distribution cloud
# repositories to install OpenStack using distribution packages
install_method: source
## Common Glance Overrides
# Set glance_default_store to "swift" if using Cloud Files backend
# or "rbd" if using ceph backend; the latter will trigger ceph to get
# installed on glance. If using a file store, a shared file store is
# recommended. See the OpenStack-Ansible install guide and the OpenStack
# documentation for more details.
# Note that "swift" is automatically set as the default back-end if there
# are any swift hosts in the environment. Use this setting to override
# this automation if you wish for a different default back-end.
# glance_default_store: file
## Ceph pool name for Glance to use
glance_rbd_store_pool: images
glance_rbd_store_chunk_size: 8
## Common Nova Overrides
# When nova_libvirt_images_rbd_pool is defined, ceph will be installed on nova
# hosts.
# nova_libvirt_images_rbd_pool: vms
# If you wish to change the dhcp_domain configured for both nova and neutron
# dhcp_domain: openstacklocal
## Common Glance Overrides when using a Swift back-end
# By default when 'glance_default_store' is set to 'swift' the playbooks will
# expect to use the Swift back-end that is configured in the same inventory.
# If the Swift back-end is not in the same inventory (ie it is already setup
# through some other means) then these settings should be used.
#
# NOTE: Ensure that the auth version matches your authentication endpoint.
#
# NOTE: If the password for glance_swift_store_key contains a dollar sign ($),
# it must be escaped with an additional dollar sign ($$), not a backslash. For
# example, a password of "super$ecure" would need to be entered as
# "super$$ecure" below. See Launchpad Bug #1259729 for more details.
#
# glance_swift_store_auth_version: 3
# glance_swift_store_auth_address: "https://some.auth.url.com"
# glance_swift_store_user: "OPENSTACK_TENANT_ID:OPENSTACK_USER_NAME"
# glance_swift_store_key: "OPENSTACK_USER_PASSWORD"
# glance_swift_store_container: "NAME_OF_SWIFT_CONTAINER"
# glance_swift_store_region: "NAME_OF_REGION"
## Common Ceph Overrides
# ceph_mons:
# - 10.16.5.40
# - 10.16.5.41
# - 10.16.5.42
## Custom Ceph Configuration File (ceph.conf)
# By default, your deployment host will connect to one of the mons defined above to
# obtain a copy of your cluster's ceph.conf. If you prefer, uncomment ceph_conf_file
# and customise to avoid ceph.conf being copied from a mon.
ceph_conf_file: |
[global]
fsid = f9f051cc-e8b7-4938-a76d-d15ab2ef48ea
mon_initial_members = mon01_kns2, mon02_kns2, mon03_kns2
mon_host = 172.16.32.16,172.16.32.17,172.16.32.18
# optionally, you can use this construct to avoid defining this list twice:
# mon_host = {{ ceph_mons|join(',') }}
auth_cluster_required = cephx
auth_service_required = cephx
public_network = 172.16.32.0/24
mon_allow_pool_delete = true
[mds.mon01_kns2]
host = mon01_kns2
[mds.mon02_kns2]
host = mon02_kns2
[mds.mon03_kns2]
host = mon03_kns2
[client]
rbd cache = true
rbd cache writethrough until flush = true
# By default, openstack-ansible configures all OpenStack services to talk to
# RabbitMQ over encrypted connections on port 5671. To opt-out of this default,
# set the rabbitmq_use_ssl variable to 'false'. The default setting of 'true'
# is highly recommended for securing the contents of RabbitMQ messages.
# rabbitmq_use_ssl: false
# RabbitMQ management plugin is enabled by default, the guest user has been
# removed for security reasons and a new userid 'monitoring' has been created
# with the 'monitoring' user tag. In order to modify the userid, uncomment the
# following and change 'monitoring' to your userid of choice.
# rabbitmq_monitoring_userid: monitoring
## Additional pinning generator that will allow for more packages to be pinned as you see fit.
## All pins allow for package and versions to be defined. Be careful using this as versions
## are always subject to change and updates regarding security will become your problem from this
## point on. Pinning can be done based on a package version, release, or origin. Use "*" in the
## package name to indicate that you want to pin all package to a particular constraint.
# apt_pinned_packages:
# - { package: "lxc", version: "1.0.7-0ubuntu0.1" }
# - { package: "libvirt-bin", version: "1.2.2-0ubuntu13.1.9" }
# - { package: "rabbitmq-server", origin: "www.rabbitmq.com" }
# - { package: "*", release: "MariaDB" }
## Environment variable settings
# This allows users to specify the additional environment variables to be set
# which is useful in setting where you working behind a proxy. If working behind
# a proxy It's important to always specify the scheme as "http://". This is what
# the underlying python libraries will handle best. This proxy information will be
# placed both on the hosts and inside the containers.
## Example environment variable setup:
## This is used by apt-cacher-ng to download apt packages:
# proxy_env_url: http://username:pa$$w0rd@10.10.10.9:9000/
## (1) This sets up a permanent environment, used during and after deployment:
# no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
# global_environment_variables:
# HTTP_PROXY: "{{ proxy_env_url }}"
# HTTPS_PROXY: "{{ proxy_env_url }}"
# NO_PROXY: "{{ no_proxy_env }}"
# http_proxy: "{{ proxy_env_url }}"
# https_proxy: "{{ proxy_env_url }}"
# no_proxy: "{{ no_proxy_env }}"
#
## (2) This is applied only during deployment, nothing is left after deployment is complete:
# deployment_environment_variables:
# http_proxy: "{{ proxy_env_url }}"
# https_proxy: "{{ proxy_env_url }}"
# no_proxy: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['keystone_all'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
## SSH connection wait time
# If an increased delay for the ssh connection check is desired,
# uncomment this variable and set it appropriately.
#ssh_delay: 5
## HAProxy and keepalived
# All the previous variables are used inside a var, in the group vars.
# You can override the current keepalived definition (see
# group_vars/all/keepalived.yml) in your user space if necessary.
#
# Uncomment this to disable keepalived installation (cf. documentation)
# haproxy_use_keepalived: False
#
# HAProxy Keepalived configuration (cf. documentation)
# Make sure that this is set correctly according to the CIDR used for your
# internal and external addresses.
# haproxy_keepalived_external_vip_cidr: "{{external_lb_vip_address}}/32"
# haproxy_keepalived_internal_vip_cidr: "{{internal_lb_vip_address}}/32"
# haproxy_keepalived_external_interface:
# haproxy_keepalived_internal_interface:
# Defines the default VRRP id used for keepalived with haproxy.
# Overwrite it to your value to make sure you don't overlap
# with existing VRRPs id on your network. Default is 10 for the external and 11 for the
# internal VRRPs
# haproxy_keepalived_external_virtual_router_id:
# haproxy_keepalived_internal_virtual_router_id:
# Defines the VRRP master/backup priority. Defaults respectively to 100 and 20
# haproxy_keepalived_priority_master:
# haproxy_keepalived_priority_backup:
# Keepalived default IP address used to check its alive status (IPv4 only)
# keepalived_ping_address: "193.0.14.129"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment