Help us improve your experience.

Let us know what you think.

Do you have time for a two-minute survey?

header-navigation
keyboard_arrow_up
close
keyboard_arrow_left
Contrail Cloud Deployment Guide
Table of Contents Expand all
list Table of Contents
file_download PDF
{ "lLangCode": "en", "lName": "English", "lCountryCode": "us", "transcode": "en_US" }
English
keyboard_arrow_right

Appendix A: Sample Configuration Files

date_range 18-Apr-23
Note:

AppFormix was renamed to Contrail Insights, but is still called appformix in the YAML configuration files.

Sample site.yml Configuration File

content_copy zoom_out_map
# Copyright 2023 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#

global:
  # List of DNS nameservers
  dns:
    # Google Public DNS
    - "8.8.8.8"
    - "8.8.4.4"
  # List of NTP time servers
  ntp:
    # public pool.ntp.org
    - "0.pool.ntp.org"
    - "1.pool.ntp.org"
    - "2.pool.ntp.org"
    - "3.pool.ntp.org"
  # Timezone for all servers
  timezone: 'America/Los_Angeles'
  rhel:
    # Contrail Cloud Activation Key
    # These details are provided when you request an activation key from
    # contrail cloud subscriptions <contrail_cloud_subscriptions@juniper.net>
    #
    satellite:
      #SATELLITE_KEY should be defined in vault-data.yml file
      #SATELLITE_ORG
      organization: "ContrailCloud16"
      #SATELLITE_FQDN
      fqdn: contrail-cloud.juniper.net
  # DNS domain information.
  # Must be unique for every deployment to avoid name conflicts.
  # Need not be a registered DNS domain.
  domain: "my.unique.domain"

jumphost:
  network:
    # network used for provisioning (PXE booting) servers
    provision:
      # jumphost nic to be used for provisioning (PXE booting) servers
      nic: eno1

control_hosts:
  # Contains a list of label to disk mappings for roles
  disk_mapping:
    # the control host always uses the "baremetal" role
    baremetal:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   name: disk device path (e.g. /dev/sdb)
      #   OR
      #   hctl: alternative notation for disk paths specifying SCSI address
      #    (Host, Channel, Target and Lun) The HCTL can be found with the
      #    lsscsi (or lspci) command or it can be found in introspection data
      #
      - label: spinning-0
        name: /dev/sdb
      - label: spinning-1
        name: /dev/sdc
      - label: spinning-2
        name: /dev/sdd
      - label: spinning-3
        name: /dev/sde
      - label: ssd-0
        hctl: "0:2:3:0"
  storage:
    # Define a set of disk groups that can be referenced for VM virtual disk allocations
    # These become virsh storage pools on the control host
    # Each pool has:
    #   mountpoint: "/absolute/path/where/lvm/will/get/mounted"
    #   type: Either "dir" or "logical".
    #       "dir" does not create any new volumes
    #         it is useful if one large hardware raid is used as /
    #       "logical" is a LVM volume placed on the list of "disk".
    #   disk: List of disk devices to use for the pool
    hdd_storage:
      mountpoint: "/srv/hdd_storage"
      type: logical
      disk:
        - "/dev/disk/by-alias/spinning-0"
        - "/dev/disk/by-alias/spinning-1"
        - "/dev/disk/by-alias/spinning-2"
        - "/dev/disk/by-alias/spinning-3"
    ssd_storage:
      mountpoint: "/srv/ssd_storage"
      type: logical
      disk:
        - "/dev/disk/by-alias/ssd-0"
    #srv:
    #  mountpoint: "/srv"
    #  type: dir

  vm:
    # VM for Openstack Controller role
    control:
      disk:
        # Root disk
        vda:
          # Virsh storage pool (see storage above)
          pool: hdd_storage
    # VMs for ContrailController role
    contrail-k8s:
      disk:
        # Root disk
        vda:
          # Virsh storage pool (see storage above)
          pool: hdd_storage
    # VM for ContrailTsn role
    contrail-tsn:
      disk:
        # Root disk
        vda:
          # Virsh storage pool (see storage above)
          pool: hdd_storage
    # VM for AppFormix controller role
    appformix-controller:
      disk:
        # Root disk
        vda:
          # Virsh storage pool (see storage above)
          pool: hdd_storage

compute_hosts:
  sriov:
    #enable sriov support
    enabled: true
    #enable sriov with dpdk
    # Contrail vrouter mode:
    #   supported values are: dpdk or anything else means kernel vRouter
    mode: dpdk
    #Sriov NumVFs separated by comma
    num_vf:
      - "ens2f1:7"
    #NovaPCIPassthrough settings
    pci_passthrough:
      - devname: "ens2f1"
        physical_network: "sriov1"
  root_disk:
  # Define root disk for the listed ironic profiles.
  # The default of "/dev/sda" will be used if there is no
  # specific profile definition
  #
  # In case 'name' hint should be dropped in favor of other
  # hints the block scalar can be used:
  #
  #    ComputeKernel0Hw0: |
  #      vendor: VendorName
  #
  # which will overwrite default values.
  # To keep key-value structure and not use 'name' hint, it can be set to the
  # value which always evaluate to True, for example:
  #
  #    ComputeKernel0Hw0:
  #      name: "s!=NonExistingDevice"
  #      vendor: VendorName
  #
  # For more details please check:
  # https://docs.openstack.org/ironic/latest/install/advanced.html#specifying-the-disk-for-deployment-root-device-hints
  #
    ComputeKernel0Hw0:
      name: "/dev/sda"
    ComputeKernel0Hw1:
      name: "/dev/sda"
    ComputeKernel1Hw1:
      name: "/dev/sda"
    ComputeKernel1Hw0:
      name: "/dev/sda"
    ComputeDpdk0Hw2:
      name: "/dev/sda"
    ComputeDpdk1Hw3:
      name: "/dev/sda"
    ComputeSriov0Hw4:
      name: "/dev/sda"
    ComputeSriov1Hw5:
      name: "/dev/sda"
  resource:
    minimal_disk:
    # This value will be used as the local_gb size for the listed ironic profiles
    # If not defined for a profile then the default will be used
      ComputeKernel0Hw0: 50
      ComputeKernel0Hw1: 50
      ComputeKernel1Hw1: 50
      ComputeKernel1Hw0: 50
      ComputeDpdk0Hw2: 50
      ComputeDpdk1Hw3: 50
      ComputeSriov0Hw4: 50
      ComputeSriov1Hw5: 50

storage_hosts:
  root_disk:
  # Define root disk for the listed ironic profiles.
  # The default of "/dev/sda" will be used if there is no
  # specific profile definition
    CephStorage0Hw6:
      name: "/dev/sda"
    CephStorage1Hw7:
      name: "/dev/sda"

undercloud:
  nova:
    # Nova flavor definitions for roles
    flavor:
      CephStorage0Hw6:
        cpu: 1
        memory: 4
        disk: 40
        ephemeral: 0
      CephStorage1Hw7:
        cpu: 1
        memory: 4
        disk: 40
        ephemeral: 0
      ComputeKernel0Hw0:
        cpu: 8
        memory: 24
        disk: 40
        ephemeral: 0
      ComputeKernel0Hw1:
        cpu: 8
        memory: 24
        disk: 40
        ephemeral: 0
      ComputeKernel1Hw1:
        cpu: 8
        memory: 24
        disk: 40
        ephemeral: 0
      ComputeKernel1Hw0:
        cpu: 8
        memory: 24
        disk: 40
        ephemeral: 0
      ComputeDpdk0Hw2:
        cpu: 8
        memory: 24
        disk: 40
        ephemeral: 0
      ComputeDpdk1Hw3:
        cpu: 8
        memory: 24
        disk: 40
        ephemeral: 0
      ComputeSriov0Hw4:
        cpu: 8
        memory: 24
        disk: 40
        ephemeral: 0
      ComputeSriov1Hw5:
        cpu: 8
        memory: 24
        disk: 40
        ephemeral: 0

k8s:
  external_vip_ip: 10.10.10.102
  internal_api_vip_ip: 172.16.0.91

overcloud:
  # Contains a list of label to disk mappings for roles.
  # When Ceph Storage is disabled, compute-related roles (Compute* and
  # ComputeDpdk* roles) will use any disks labeled with
  # "ephemeral-<digits>" for local Nova ephemeral storage.
  disk_mapping:
    ComputeKernel:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeKernel0Hw0:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeKernel1Hw0:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeKernel1Hw1:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeKernel0Hw1:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeDpdk:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeDpdk0Hw2:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeDpdk1Hw3:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeSriov:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeSriov0Hw4:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
    ComputeSriov1Hw5:
      # Mapping of labels to disk devices. The label is assigned to the disk
      # device so that the disk can be referenced by the alias in other
      # configurations. for example /dev/disk/by-alias/<label>
      # Each list element contains:
      #   label: label to assign
      #   hctl: disk device path H:C:T:L (the path must exist). see lsscsi
      - label: ephemeral-0
        hctl: '5:0:0:0'
      - label: ephemeral-1
        hctl: '6:0:0:0'
      - label: ephemeral-2
        hctl: '7:0:0:0'
      - label: ephemeral-3
        hctl: '8:0:0:0'
  extra_config:
    ComputeDpdkParameters:
      TunedProfileName: "cpu-partitioning"
      ContrailDpdkOptions: "--vr_flow_entries=2000000 --yield_option 0"
      KernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=1GB hugepagesz=1G hugepages=64 hugepagesz=2M hugepages=2048 isolcpus=2-9,22-29"
      IsolCpusList: "2-9,22-29"
      NovaVcpuPinSet: ['4-9','24-29']
      ContrailSettings:
        SERVICE_CORE_MASK: '0x1'
        DPDK_CTRL_THREAD_MASK: '0x1'
      ExtraSysctlSettings:
        vm.nr_hugepages:
          value: 64
        vm.max_map_count:
          value: 128960
    ComputeDpdk0Hw2Parameters:
      TunedProfileName: "cpu-partitioning"
      ContrailDpdkOptions: "--vr_flow_entries=2000000 --yield_option 0"
      KernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=1GB hugepagesz=1G hugepages=64 hugepagesz=2M hugepages=2048 isolcpus=2-9,22-29"
      IsolCpusList: "2-9,22-29"
      NovaVcpuPinSet: ['4-9','24-29']
      ContrailSettings:
        SERVICE_CORE_MASK: '0x1'
        DPDK_CTRL_THREAD_MASK: '0x1'
      ExtraSysctlSettings:
        vm.nr_hugepages:
          value: 64
        vm.max_map_count:
          value: 128960
    ComputeDpdk1Hw3Parameters:
      TunedProfileName: "cpu-partitioning"
      ContrailDpdkOptions: "--vr_flow_entries=2000000 --yield_option 0"
      KernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=1GB hugepagesz=1G hugepages=64 hugepagesz=2M hugepages=2048 isolcpus=2-9,22-29"
      IsolCpusList: "2-9,22-29"
      NovaVcpuPinSet: ['4-9','24-29']
      ContrailSettings:
        SERVICE_CORE_MASK: '0x1'
        DPDK_CTRL_THREAD_MASK: '0x1'
      ExtraSysctlSettings:
        vm.nr_hugepages:
          value: 64
        vm.max_map_count:
          value: 128960
    ComputeKernelParameters:
      ContrailVrouterHugepages1GB: 64
      ContrailVrouterHugepages2MB: 2048
      KernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=1GB hugepagesz=1G hugepages=64 hugepagesz=2M hugepages=2048"
      ExtraSysctlSettings:
        vm.nr_hugepages:
          value: 64
        vm.max_map_count:
          value: 128960
    ComputeKernel0Hw0Parameters:
      ContrailVrouterHugepages1GB: 64
      ContrailVrouterHugepages2MB: 2048
      KernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=1GB hugepagesz=1G hugepages=64 hugepagesz=2M hugepages=2048"
      ExtraSysctlSettings:
        vm.nr_hugepages:
          value: 64
        vm.max_map_count:
          value: 128960
    ComputeKernel0Hw1Parameters:
      ContrailVrouterHugepages1GB: 64
      ContrailVrouterHugepages2MB: 2048
      KernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=1GB hugepagesz=1G hugepages=64 hugepagesz=2M hugepages=2048"
      ExtraSysctlSettings:
        vm.nr_hugepages:
          value: 64
        vm.max_map_count:
          value: 128960
    ComputeKernel1Hw0Parameters:
      ContrailVrouterHugepages1GB: 64
      ContrailVrouterHugepages2MB: 2048
      KernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=1GB hugepagesz=1G hugepages=64 hugepagesz=2M hugepages=2048"
      ExtraSysctlSettings:
        vm.nr_hugepages:
          value: 64
        vm.max_map_count:
          value: 128960
    ComputeKernel1Hw1Parameters:
      ContrailVrouterHugepages1GB: 64
      ContrailVrouterHugepages2MB: 2048
      KernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=1GB hugepagesz=1G hugepages=64 hugepagesz=2M hugepages=2048"
      ExtraSysctlSettings:
        vm.nr_hugepages:
          value: 64
        vm.max_map_count:
          value: 128960

  network:
    # The external network is used for referencing the overcloud APIs from outside the infrastructure.
    external:
      # Network name used by TripleO Heat Templates
      heat_name: External
      # CIDR (IP/prefix) for the external network subnet
      # Corresponds to the ExternalIpSubnet heat property
      cidr: "10.84.36.64/28"
      # Default route for the external network
      # Corresponds to the ExternalInterfaceDefaultRoute heat property
      gateway: "10.84.36.78"
      # VLAN tag for the external network
      # Corresponds to the ExternalNetworkVlanID heat property
      vlan: 1350
      # Floating virtual IP for the Openstack APIs on the external network
      # Corresponds to the PublicVirtualFixedIPs heat property
      vip: "10.84.36.77"
      # DHCP pool for the external network
      # Be sure that the range is large enough to accomodate all nodes in the external network
      pool:
        # Range start for the DHCP pool
        start: "10.84.36.65"
        # Range end for the DHCP pool
        end: "10.84.36.75"
      # MTU for external network
      # Corresponds to the ExternalNetworkMtu heat property
      mtu: 9000
      # List of roles that can be on this network
      role:
        - Controller
        - AppformixController
    # The internal API network is used for control plane signalling and service API calls
    internal_api:
      # Network name used by TripleO Heat Templates
      heat_name: InternalApi
      # VLAN tag for the internal API network
      # Corresponds to the InternalApiNetworkVlanID heat property
      vlan: 1200
      # CIDR (IP/prefix) for the internal api supernet network subnet
      # Corresponds to the InternalApiSupernet heat property
      # Supernet is used in spine/leaf configuration
      # Supernet accommodate all related leaf networks, e.g. internal_api0 and internal_api1
      # Supernet is used to create static routes between leafs
      # Supernet is defined only for main network, not per leafs
      supernet: "172.16.0.0/22"
      # CIDR (IP/prefix) for the internal api network subnet
      # Corresponds to the InternalApiIpSubnet heat property
      cidr: "172.16.0.0/24"
      # Default route for the internal api network
      # Corresponds to the InternalApiInterfaceDefaultRoute heat property
      gateway: 172.16.0.1
      # MTU for internal api network
      # Corresponds to the InternalApiNetworkMtu heat property
      mtu: 9000
      # DHCP pool for the internal api network
      # Be sure that the range is large enough to accomodate all nodes in the internal api network
      pool:
        # Range start for the DHCP pool
        start: 172.16.0.100
        # Range end for the DHCP pool
        end: 172.16.0.160
      # Floating virtual IP for the Openstack APIs on the internal api network
      # Corresponds to the InternalApiVirtualFixedIPs heat property
      vip: 172.16.0.90
      # List of roles that can be on this network
      role:
        - Controller
        - ContrailTsn
        - AppformixController
        - ComputeKernel
        - ComputeDpdk
        - ComputeSriov
    # Leaf 0 subnet of the internal_api network
    internal_api0:
      # Network name used by TripleO Heat Templates
      heat_name: InternalApi0
      # VLAN tag for the internal API 0 network
      # Corresponds to the InternalApi0NetworkVlanID heat property
      vlan: 1201
      # CIDR (IP/prefix) for the internal api 0 network subnet
      # Corresponds to the InternalApi0IpSubnet heat property
      cidr: "172.16.1.0/24"
      # Default route for the internal api 0 network
      # Corresponds to the InternalApi0InterfaceDefaultRoute heat property
      gateway: 172.16.1.1
      # MTU for internal api 0 network
      # Corresponds to the InternalApi0NetworkMtu heat property
      mtu: 9000
      # DHCP pool for the internal api 0 network
      # Be sure that the range is large enough to accomodate all nodes in the internal api network
      pool:
        # Range start for the DHCP pool
        start: 172.16.1.100
        # Range end for the DHCP pool
        end: 172.16.1.200
      # List of roles that can be on this network
      role:
        - ComputeKernel0Hw0
        - ComputeKernel0Hw1
        - ComputeSriov0Hw4
        - ComputeDpdk0Hw2
    # Leaf 1 subnet of the internal_api network
    internal_api1:
      # Network name used by TripleO Heat Templates
      heat_name: InternalApi1
      # VLAN tag for the internal API 1 network
      # Corresponds to the InternalApi1NetworkVlanID heat property
      vlan: 1202
      # CIDR (IP/prefix) for the internal api 1 network subnet
      # Corresponds to the InternalApi1IpSubnet heat property
      cidr: "172.16.2.0/24"
      # Default route for the internal api 1 network
      # Corresponds to the InternalApi1InterfaceDefaultRoute heat property
      gateway: 172.16.2.1
      # MTU for internal api 1 network
      # Corresponds to the InternalApi1NetworkMtu heat property
      mtu: 9000
      # DHCP pool for the internal api 1 network
      # Be sure that the range is large enough to accomodate all nodes in the internal api network
      pool:
        # Range start for the DHCP pool
        start: 172.16.2.100
        # Range end for the DHCP pool
        end: 172.16.2.200
      # List of roles that can be on this network
      role:
        - ComputeSriov1Hw5
        - ComputeKernel1Hw0
        - ComputeDpdk1Hw3
        - ComputeKernel1Hw1
    # The management network is defined for backwards-compatibility in RHOSP and is not
    # used by default by any roles.
    management:
      # Network name used by TripleO Heat Templates
      heat_name: Management
      # VLAN tag for the management network
      # Corresponds to the ManagementNetworkVlanID heat property
      vlan: 1300
      # CIDR (IP/prefix) for the network subnet
      # Corresponds to the ManagementIpSubnet heat property
      cidr: "192.168.1.0/24"
      # MTU for the network
      # Corresponds to the ManagementNetworkMtu heat property
      mtu: 9000
      # DHCP pool for the  network
      # Be sure that the range is large enough to accomodate all nodes in the network
      pool:
        # Range start for the DHCP pool
        start: 192.168.1.100
        # Range end for the DHCP pool
        end: 192.168.1.200
    # The storage network is used for Compute storage access
    storage:
      # Network name used by TripleO Heat Templates
      heat_name: Storage
      # VLAN tag for the storage network
      # Corresponds to the StorageNetworkVlanID heat property
      vlan: 1500
      supernet: "172.16.16.0/22"
      cidr: "172.16.16.0/24"
      gateway: 172.16.16.1
      mtu: 9000
      pool:
        start: 172.16.16.100
        end: 172.16.16.200
      # List of roles that can be on this network
      role:
        - Controller
        - CephStorage
        - ComputeKernel
        - ComputeDpdk
        - ComputeSriov
        - ContrailTsn
    # Leaf 0 subnet of the storage network
    storage0:
      # Network name used by TripleO Heat Templates
      heat_name: Storage0
      vlan: 1501
      cidr: "172.16.17.0/24"
      gateway: 172.16.17.1
      mtu: 9000
      pool:
        start: 172.16.17.100
        end: 172.16.17.200
      # List of roles that can be on this network
      role:
        - CephStorage0Hw6
        - ComputeKernel0Hw0
        - ComputeKernel0Hw1
        - ComputeDpdk0Hw2
        - ComputeSriov0Hw4
    # Leaf 1 subnet of the storage network
    storage1:
      # Network name used by TripleO Heat Templates
      heat_name: Storage1
      vlan: 1502
      cidr: "172.16.18.0/24"
      gateway: 172.16.18.1
      mtu: 9000
      pool:
        start: 172.16.18.100
        end: 172.16.18.200
      # List of roles that can be on this network
      role:
        - ComputeSriov1Hw5
        - ComputeKernel1Hw0
        - ComputeDpdk1Hw3
        - ComputeKernel1Hw1
        - CephStorage1Hw7
    # The storage management network is used for storage operations such as replication
    storage_mgmt:
      # Network name used by TripleO Heat Templates
      heat_name: StorageMgmt
      # VLAN tag for the storage management network
      # Corresponds to the StorageMgmtNetworkVlanID heat property
      vlan: 1450
      supernet: "172.16.20.0/22"
      cidr: "172.16.20.0/24"
      gateway: 172.16.20.1
      vip_enable: false
      mtu: 9000
      pool:
        start: 172.16.20.100
        end: 172.16.20.200
      # List of roles that can be on this network
      role:
        - Controller
    # Leaf 0 subnet of the storage_mgmt network
    storage_mgmt0:
      # Network name used by TripleO Heat Templates
      heat_name: StorageMgmt0
      vlan: 1451
      cidr: "172.16.21.0/24"
      gateway: 172.16.21.1
      mtu: 9000
      pool:
        start: 172.16.21.100
        end: 172.16.21.200
      # List of roles that can be on this network
      role:
        - CephStorage0Hw6
    # Leaf 1 subnet of the storage_mgmt network
    storage_mgmt1:
      # Network name used by TripleO Heat Templates
      heat_name: StorageMgmt1
      vlan: 1452
      cidr: "172.16.22.0/24"
      gateway: 172.16.22.1
      mtu: 9000
      pool:
        start: 172.16.22.100
        end: 172.16.22.200
      # List of roles that can be on this network
      role:
        - CephStorage1Hw7
    # The tenant network is used for tenant workload data
    tenant:
      # Network name used by TripleO Heat Templates
      heat_name: Tenant
      # VLAN tag for the tenant network
      # Corresponds to the TenantNetworkVlanID heat property
      vlan: 1250
      supernet: "172.16.80.0/21"
      cidr: "172.16.81.0/24"
      gateway: 172.16.81.1
      vrouter_gateway: 172.16.81.1
      mtu: 9000
      pool:
        start: 172.16.81.100
        end: 172.16.81.200
      # List of roles that can be on this network
      role:
        - ContrailTsn
        - ComputeKernel
        - ComputeDpdk
        - ComputeSriov
    # Leaf 0 subnet of the tenant network
    tenant0:
      # Network name used by TripleO Heat Templates
      heat_name: Tenant0
      vlan: 1251
      cidr: "172.16.82.0/24"
      gateway: 172.16.82.1
      vrouter_gateway: 172.16.82.1
      mtu: 9000
      pool:
        start: 172.16.82.100
        end: 172.16.82.200
      # List of roles that can be on this network
      role:
        - ComputeKernel0Hw0
        - ComputeKernel0Hw1
        - ComputeDpdk0Hw2
        - ComputeSriov0Hw4
    # Leaf 1 subnet of the tenant network
    tenant1:
      # Network name used by TripleO Heat Templates
      heat_name: Tenant1
      vlan: 1252
      cidr: "172.16.83.0/24"
      gateway: 172.16.83.1
      vrouter_gateway: 172.16.83.1
      mtu: 9000
      pool:
        start: 172.16.83.100
        end: 172.16.83.200
      # List of roles that can be on this network
      role:
        - ComputeSriov1Hw5
        - ComputeKernel1Hw0
        - ComputeDpdk1Hw3
        - ComputeKernel1Hw1
  # Contrail sepcific settings
  #contrail:
  #  aaa_mode: cloud-admin
  #  vrouter:
  #    contrail_settings:
  #      # Settings per profile.
  #      # Profile's contrail_settings replace default settings and should include
  #      # all keys and values which are intended to be exported on given role.
  #      # When leafs are used it implies per profile configuration as it defines
  #      # VROUTER_GATEWAY for profile by quering node's tenant network for
  #      # vrouter_gateway value.
  #      default:
  #        VROUTER_GATEWAY: 172.16.81.254
  #        BGP_ASN: 64512
  #        LACP_RATE: 1
  #      ComputeKernel1Hw0:
  #        LACP_RATE: 1

  # Information used to generate the SSL certificates for the public Openstack service APIs
  tls:
    # SSL key size in KB for CA generation 
    ca_key_size: 2048
    # SSL key size in KB for SSL of openstack external VIP
    ssl_key_size: 2048
    #countryName_default
    country: "US"
    #stateOrProvinceName_default
    state: "CA"
    #localityName_default
    city: "Sunnyvale"
    #organizationalUnitName_default
    organization: "JNPR"
    #commonName_default - this is typically the external VIP
    common_name: "10.10.10.90"

ceph:
  # Choice to enable Ceph storage in the overcloud.
  # "true" means that Ceph will be deployed as the backed for Cinder and Glance services.
  # "false" false means that Ceph will not be deployed.
  enabled: true
  # Ceph OSD disk configuration
  osd:
    # Update the Ceph crush map when OSDs are started
    crush_update_on_start: true
    # Ceph OSD disk assignments. The named disks will be exclusively used by Ceph for persistence.
    # Lvm is a default scenario for ceph deployment with bluestore as a backend.
    # When all named disks are the same type, spinning or solid state, all of them will be used
    # as ceph osds. When disks with mixed types are defined spinning disks will be used as osds
    # and on solid state disks ceph db will be created. For mixed types of disks the automatic pgp
    # number calculation requires assigning key 'contents' with value 'db' to ssd disks.
    # In below example disks sd[b-e] are spinning disks and sdf is solid state disk.
    default:
      disk:
        '/dev/sdb':
        '/dev/sdc':
        '/dev/sdd':
        '/dev/sde':
        '/dev/sdf':
          contents: db
    CephStorage0Hw6:
      disk:
        '/dev/sdb':
        '/dev/sdc':
        '/dev/sdd':
        '/dev/sde':
        '/dev/sdf':
          contents: db
    CephStorage1Hw7:
      disk:
        '/dev/sdb':
        '/dev/sdc':
        '/dev/sdd':
        '/dev/sde':
        '/dev/sdf':
          contents: db
  # By default, pgp number is calculated by contrail cloud. If you want, you can give this parameter
  # by yourself. Use the calculator on the website: https://ceph.com/pgcalc/. Calculator takes into
  # account also pool utilization. Calculated pgp_num can be introduced in configuration as below.
  # It's defined per used pool.
  #  pool:
  #    vms:
  #      pgp_num: 32
  #    rbd:
  #      pgp_num: 32
  #    images:
  #      pgp_num: 32
  #    volumes:
  #      pgp_num: 32
  #    backups:
  #      pgp_num: 32
  #
  # Rados Gateway when enabled, which is a default behaviour, creates it's own ceph pools
  # not tracked by contrail cloud. Those pools can be predefined to better control
  # their sizes. Below pools definitions are not an exhaustive, please consult with
  # https://ceph.com/pgcalc/
  # Pools should have enabled application according to their use.
  # If not changed explicit, pools are created with 'rbd' application assigned.
  # Available options are:
  #   - rbd for the Ceph Block Device
  #   - rgw for the Ceph Object Gateway
  #   - cephfs for the Ceph Filesystem
  # or user defined value for custom application.
  # More details can be found on
  # https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/storage_strategies_guide/pools-1#enable-application
  #    .rgw.root:
  #      pgp_num: 16
  #      enabled: true
  #      replica: 3
  #      application: rgw
  #    default.rgw.control:
  #      pgp_num: 16
  #      enabled: true
  #      replica: 3
  #      application: rgw
  #    default.rgw.meta:
  #      pgp_num: 16
  #      enabled: true
  #      replica: 3
  #      application: rgw
  #    default.rgw.log:
  #      pgp_num: 16
  #      enabled: true
  #      replica: 3
  #      application: rgw
  #    default.rgw.buckets.index:
  #      pgp_num: 16
  #      enabled: true
  #      replica: 3
  #      application: rgw
  #    default.rgw.buckets.data:
  #      pgp_num: 16
  #      enabled: true
  #      replica: 3
  #      application: rgw
  #    default.rgw.buckets.non-ec:
  #      pgp_num: 16
  #      enabled: true
  #      replica: 3
  #      application: rgw

appformix:
  # Set to true if you have multiple control hosts which allows Apformix to run in HA mode
  enable_ha: true
  # Floating virtual IP for the Appformix APIs on the external network, used and required by HA mode.
  vip: "10.10.10.101"
  # Floating virtual IP for the Appformix APIs on the internal network, used and required by HA mode.
  secondary_vip: "172.16.0.89"
  keepalived:
    # Set which interface will be used for vrrp
    vrrp_interface: "enp2s0"
    # Set which interface will be used for second vrrp
    secondary_vrrp_interface: "vlan1200"

Sample inventory.yml Configuration File

content_copy zoom_out_map
# Copyright 2023 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#

# Common values shared among group of nodes
ipmi_hardware1: &hardware1
  pm_type: "ipmi"
  pm_user: "{{ vault['inventory_nodes']['hardware1']['pm_user'] }}"
  pm_password: "{{ vault['inventory_nodes']['hardware1']['pm_password'] }}"
  capabilities: "boot_mode:uefi"

# List of baremetal server nodes that can be used for the deploying roles
# Each list item contains:
#    name: logical name to assign this resource (string)
#    pm_addr: IP address for resourceIPMI interface (string)
#    pm_type: Ironic driver to interface with this resource (typically ipmi) (string)
#    pm_user: IPMI user account (string)
#    pm_password: IPMI account user password (string)
#    capabilities: String of comma separated list of node capabilities.
#                  Capabilities 'profile' and 'boot_option' are managed
#                  by Contrail Cloud and will be omitted. (string)
#                  e.g capabilities: "boot_mode:uefi" set boot mode to uefi
# 
# Some values common for nodes can be moved to dedicated section like ipmi_hardware1
# and be referred like this:
#    <<: *hardware1
inventory_nodes:
  - name: "control-host1"
    pm_addr: "10.10.11.58"
    <<: *hardware1
  - name: "control-host2"
    pm_addr: "10.10.11.59"
    <<: *hardware1
  - name: "control-host3"
    pm_addr: "10.10.11.60"
    <<: *hardware1
  - name: "storage1"
    pm_addr: "10.10.11.61"
    <<: *hardware1
  - name: "storage2"
    pm_addr: "10.10.11.62"
    <<: *hardware1
  - name: "storage3"
    pm_addr: "10.10.11.63"
    <<: *hardware1
  - name: "computedpdk1"
    pm_addr: "10.10.11.64"
    <<: *hardware1
  - name: "computedpdk2"
    pm_addr: "10.10.11.65"
    <<: *hardware1
  - name: "compute1"
    pm_addr: "10.10.11.66"
    <<: *hardware1
  - name: "compute2"
    pm_addr: "10.10.11.67"
    <<: *hardware1
  - name: "compute3"
    pm_addr: "10.10.11.68"
    <<: *hardware1
  - name: "compute4"
    pm_addr: "10.10.11.69"
    <<: *hardware1
  - name: "computesriov1"
    pm_addr: "10.10.11.70"
    <<: *hardware1
  - name: "computesriov2"
    pm_addr: "10.10.11.71"
    <<: *hardware1

Sample control-host-nodes.yml Configuration File

content_copy zoom_out_map
# Copyright 2023 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#

# List of nodes to use as control host role
# Each list item contains a set of variables which can be referenced
# with "{{ host.<variable> }}" in control_host_nodes_network_config below.
# Other ad-hoc variables can be added as needed.
#    name: name of a node in the inventory (string)
#    hostname: hostname to assign the node after it is imaged (string)
#    control_ip_netmask: static CIDR address on Control Plane network.
#                        Choose a value outside the DHCP range. (string)
#    dns_server1,dns_server2: dns server addresses (string)
#    max_mtu: The largest MTU supported by an interface
#

control_host_nodes:
  - name: "control-host1"
    control_ip_netmask: "192.168.213.5/24"
    max_mtu: 9216
  - name: "control-host2"
    control_ip_netmask: "192.168.213.6/24"
    max_mtu: 9216
  - name: "control-host3"
    control_ip_netmask: "192.168.213.7/24"
    max_mtu: 9216

# Template for network layout on all control host nodes
# This follows the syntax
# https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v1.html#network-config-v1
# or
# https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v2.html#network-config-v2
# variables from control_host_nodes can be refered with "{{ host.<variable> }}"

control_host_nodes_network_config:
  version: 1
  config:
  - type: physical
    name: eno1
    subnets:
    - type: dhcp
    mtu: "{{ host.max_mtu }}"
    nm_controlled: true
  - type: physical
    name: eno2
    mtu: "{{ host.max_mtu }}"
    nm_controlled: true
  - type: physical
    name: ens7f0
    mtu: "{{ host.max_mtu }}"
    nm_controlled: true
  - type: physical
    name: ens7f1
    mtu: "{{ host.max_mtu }}"
    nm_controlled: true
  - type: bond
    name: bond0
    mtu: "{{ host.max_mtu }}"
    nm_controlled: true
    bond_interfaces:
      - ens7f0
      - ens7f1
    params:
      bond-mode: "802.3ad"
      xmit_hash_policy: layer3+4
      lacp_rate: fast
      miimon: "100"

control_hosts:
  # The mapping from control host interfaces to the control VM interfaces
  # The first interface (enp1s0) must always be the Control Plane network to allow the VM to PXE boot
  # VM interface names must be sequential with no gaps (e.g. enp1s0, enp2s0, enp3s0,...)
  vm_interfaces:
    - interface: enp1s0
      physical_interface: eno1
    - interface: enp2s0
      physical_interface: eno2
    - interface: enp3s0
      physical_interface: bond0

Sample overcloud-nics.yml Configuration File

content_copy zoom_out_map
# Copyright 2023 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#

Controller_network_config:
  - type: interface
    name: enp1s0
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
  - type: vlan
    device: enp1s0
    vlan_id:
      get_param: StorageNetworkVlanID
    mtu:
      get_param: StorageMtu
    addresses:
    - ip_netmask:
        get_param: StorageIpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: StorageInterfaceDefaultRoute
  - type: vlan
    device: enp1s0
    vlan_id:
      get_param: StorageMgmtNetworkVlanID
    mtu:
      get_param: StorageMgmtMtu
    addresses:
    - ip_netmask:
        get_param: StorageMgmtIpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageMgmtSupernet
      next_hop:
        get_param: StorageMgmtInterfaceDefaultRoute
  - type: vlan
    device: enp1s0
    vlan_id:
      get_param: InternalApiNetworkVlanID
    mtu:
      get_param: InternalApiMtu
    addresses:
    - ip_netmask:
        get_param: InternalApiIpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApiInterfaceDefaultRoute
  - type: interface
    name: enp2s0
    mtu:
      get_param: ExternalMtu
    addresses:
    - ip_netmask:
        get_param: ExternalIpSubnet
    routes:
    -
      default: True
      next_hop:
        get_param: ExternalInterfaceDefaultRoute
  - type: interface
    name: enp3s0
    use_dhcp: false

AppformixController_network_config:
  - type: interface
    name: enp1s0
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
  - type: vlan
    device: enp1s0
    vlan_id:
      get_param: InternalApiNetworkVlanID
    mtu:
      get_param: InternalApiMtu
    addresses:
    - ip_netmask:
        get_param: InternalApiIpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApiInterfaceDefaultRoute
  - type: interface
    name: enp2s0
    mtu:
      get_param: ExternalMtu
    addresses:
    - ip_netmask:
        get_param: ExternalIpSubnet
    routes:
    -
      default: True
      next_hop:
        get_param: ExternalInterfaceDefaultRoute
  - type: interface
    name: enp3s0
    use_dhcp: false

ContrailTsn_network_config:
  - type: interface
    name: enp1s0
    dns_servers:
      get_param: DnsServers
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    use_dhcp: false
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: enp1s0
    vlan_id:
      get_param: InternalApiNetworkVlanID
    mtu:
      get_param: InternalApiMtu
    addresses:
    - ip_netmask:
        get_param: InternalApiIpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApiInterfaceDefaultRoute
  - type: interface
    name: enp2s0
    use_dhcp: false
  - type: interface
    name: enp3s0
    use_dhcp: false
    mtu:
      get_param: TenantMtu
  - type: vlan
    device: enp3s0
    vlan_id:
      get_param: TenantNetworkVlanID
    mtu:
      get_param: TenantMtu
    use_dhcp: false
  - type: contrail_vrouter
    name: vhost0
    members:
      -
        type: interface
        name:
          str_replace:
            template: vlanVLANID
            params:
              VLANID: {get_param: TenantNetworkVlanID}
        use_dhcp: false
    mtu:
      get_param: TenantMtu
    addresses:
    - ip_netmask:
        get_param: TenantIpSubnet
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: TenantInterfaceDefaultRoute

ComputeKernel0Hw1_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage0NetworkVlanID
    mtu:
      get_param: Storage0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage0InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: InternalApi0NetworkVlanID
    mtu:
      get_param: InternalApi0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: InternalApi0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApi0InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: linux_bond
    name: bond0
    use_dhcp: false
    bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
    mtu:
      get_param: Tenant0NetworkMtu
    members:
    - type: interface
      name: nic3
      primary: true
      mtu:
        get_param: Tenant0NetworkMtu
    - type: interface
      name: nic4
      mtu:
        get_param: Tenant0NetworkMtu
  - type: vlan
    vlan_id:
      get_param: Tenant0NetworkVlanID
    device: bond0
  - type: contrail_vrouter
    name: vhost0
    use_dhcp: false
    members:
      -
        type: interface
        name:
          str_replace:
            template: vlanVLANID
            params:
              VLANID: {get_param: Tenant0NetworkVlanID}
        use_dhcp: false
    addresses:
    - ip_netmask:
        get_param: Tenant0IpSubnet
    mtu:
      get_param: Tenant0NetworkMtu
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: Tenant0InterfaceDefaultRoute

ComputeKernel0Hw0_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage0NetworkVlanID
    mtu:
      get_param: Storage0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage0InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: InternalApi0NetworkVlanID
    mtu:
      get_param: InternalApi0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: InternalApi0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApi0InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: linux_bond
    name: bond0
    use_dhcp: false
    bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
    mtu:
      get_param: Tenant0NetworkMtu
    members:
    - type: interface
      name: nic3
      primary: true
      mtu:
        get_param: Tenant0NetworkMtu
    - type: interface
      name: nic4
      mtu:
        get_param: Tenant0NetworkMtu
  - type: vlan
    vlan_id:
      get_param: Tenant0NetworkVlanID
    device: bond0
  - type: contrail_vrouter
    name: vhost0
    use_dhcp: false
    members:
      -
        type: interface
        name:
          str_replace:
            template: vlanVLANID
            params:
              VLANID: {get_param: Tenant0NetworkVlanID}
        use_dhcp: false
    addresses:
    - ip_netmask:
        get_param: Tenant0IpSubnet
    mtu:
      get_param: Tenant0NetworkMtu
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: Tenant0InterfaceDefaultRoute

ComputeKernel1Hw0_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage1NetworkVlanID
    mtu:
      get_param: Storage1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage1InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: InternalApi1NetworkVlanID
    mtu:
      get_param: InternalApi1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: InternalApi1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApi1InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: linux_bond
    name: bond0
    use_dhcp: false
    bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
    mtu:
      get_param: Tenant1NetworkMtu
    members:
    - type: interface
      name: nic3
      primary: true
      mtu:
        get_param: Tenant1NetworkMtu
    - type: interface
      name: nic4
      mtu:
        get_param: Tenant1NetworkMtu
  - type: vlan
    vlan_id:
      get_param: Tenant1NetworkVlanID
    device: bond0
  - type: contrail_vrouter
    name: vhost0
    use_dhcp: false
    members:
      -
        type: interface
        name:
          str_replace:
            template: vlanVLANID
            params:
              VLANID: {get_param: Tenant1NetworkVlanID}
        use_dhcp: false
    addresses:
    - ip_netmask:
        get_param: Tenant1IpSubnet
    mtu:
      get_param: Tenant1NetworkMtu
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: Tenant1InterfaceDefaultRoute

ComputeKernel1Hw1_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage1NetworkVlanID
    mtu:
      get_param: Storage1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage1InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: InternalApi1NetworkVlanID
    mtu:
      get_param: InternalApi1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: InternalApi1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApi1InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: linux_bond
    name: bond0
    use_dhcp: false
    bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
    mtu:
      get_param: Tenant1NetworkMtu
    members:
    - type: interface
      name: nic3
      primary: true
      mtu:
        get_param: Tenant1NetworkMtu
    - type: interface
      name: nic4
      mtu:
        get_param: Tenant1NetworkMtu
  - type: vlan
    vlan_id:
      get_param: Tenant1NetworkVlanID
    device: bond0
  - type: contrail_vrouter
    name: vhost0
    use_dhcp: false
    members:
      -
        type: interface
        name:
          str_replace:
            template: vlanVLANID
            params:
              VLANID: {get_param: Tenant1NetworkVlanID}
        use_dhcp: false
    addresses:
    - ip_netmask:
        get_param: Tenant1IpSubnet
    mtu:
      get_param: Tenant1NetworkMtu
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: Tenant1InterfaceDefaultRoute

ComputeSriov0Hw4_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage0NetworkVlanID
    mtu:
      get_param: Storage0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage0InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: InternalApi0NetworkVlanID
    mtu:
      get_param: InternalApi0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: InternalApi0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApi0InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: linux_bond
    name: bond0
    use_dhcp: false
    bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
    mtu:
      get_param: Tenant0NetworkMtu
    members:
    - type: interface
      name: nic3
      primary: true
      mtu:
        get_param: Tenant0NetworkMtu
    - type: interface
      name: nic4
      mtu:
        get_param: Tenant0NetworkMtu
  - type: vlan
    vlan_id:
      get_param: Tenant0NetworkVlanID
    device: bond0
  - type: contrail_vrouter
    name: vhost0
    use_dhcp: false
    members:
      -
        type: interface
        name:
          str_replace:
            template: vlanVLANID
            params:
              VLANID: {get_param: Tenant0NetworkVlanID}
        use_dhcp: false
    addresses:
    - ip_netmask:
        get_param: Tenant0IpSubnet
    mtu:
      get_param: Tenant0NetworkMtu
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: Tenant0InterfaceDefaultRoute

ComputeSriov1Hw5_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage1NetworkVlanID
    mtu:
      get_param: Storage1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage1InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: InternalApi1NetworkVlanID
    mtu:
      get_param: InternalApi1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: InternalApi1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApi1InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: linux_bond
    name: bond0
    use_dhcp: false
    bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
    mtu:
      get_param: Tenant1NetworkMtu
    members:
    - type: interface
      name: nic3
      primary: true
      mtu:
        get_param: Tenant1NetworkMtu
    - type: interface
      name: nic4
      mtu:
        get_param: Tenant1NetworkMtu
  - type: vlan
    vlan_id:
      get_param: Tenant1NetworkVlanID
    device: bond0
  - type: contrail_vrouter
    name: vhost0
    use_dhcp: false
    members:
      -
        type: interface
        name:
          str_replace:
            template: vlanVLANID
            params:
              VLANID: {get_param: Tenant1NetworkVlanID}
        use_dhcp: false
    addresses:
    - ip_netmask:
        get_param: Tenant1IpSubnet
    mtu:
      get_param: Tenant1NetworkMtu
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: Tenant1InterfaceDefaultRoute

ComputeDpdk0Hw2_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage0NetworkVlanID
    mtu:
      get_param: Storage0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage0InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: InternalApi0NetworkVlanID
    mtu:
      get_param: InternalApi0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: InternalApi0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApi0InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: contrail_vrouter_dpdk
    name: vhost0
    vlan_id:
      get_param: Tenant0NetworkVlanID
    driver: "{{ overcloud['contrail']['vrouter']['dpdk']['driver'] }}"
    bond_mode: 4
    bond_policy: layer2+3
    cpu_list: 1,2
    members:
    - type: interface
      name: nic3
    - type: interface
      name: nic4
    addresses:
    - ip_netmask:
        get_param: Tenant0IpSubnet
    mtu:
      get_param: Tenant0NetworkMtu
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: Tenant0InterfaceDefaultRoute

ComputeDpdk1Hw3_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage1NetworkVlanID
    mtu:
      get_param: Storage1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage1InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: InternalApi1NetworkVlanID
    mtu:
      get_param: InternalApi1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: InternalApi1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: InternalApiSupernet
      next_hop:
        get_param: InternalApi1InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: contrail_vrouter_dpdk
    name: vhost0
    vlan_id:
      get_param: Tenant1NetworkVlanID
    driver: "{{ overcloud['contrail']['vrouter']['dpdk']['driver'] }}"
    bond_mode: 4
    bond_policy: layer2+3
    cpu_list: 1,2
    members:
    - type: interface
      name: nic3
    - type: interface
      name: nic4
    addresses:
    - ip_netmask:
        get_param: Tenant1IpSubnet
    mtu:
      get_param: Tenant1NetworkMtu
    routes:
    -
      ip_netmask:
        get_param: TenantSupernet
      next_hop:
        get_param: Tenant1InterfaceDefaultRoute

CephStorage0Hw6_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage0NetworkVlanID
    mtu:
      get_param: Storage0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage0InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: StorageMgmt0NetworkVlanID
    mtu:
      get_param: StorageMgmt0NetworkMtu
    addresses:
    - ip_netmask:
        get_param: StorageMgmt0IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageMgmtSupernet
      next_hop:
        get_param: StorageMgmt0InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: interface
    name: nic3
    use_dhcp: false
  - type: interface
    name: nic4
    use_dhcp: false

CephStorage1Hw7_network_config:
  - type: interface
    name: nic1
    dns_servers:
      get_param: DnsServers
    use_dhcp: false
    mtu:
      get_param: ControlPlaneMtu
    addresses:
    - ip_netmask:
        list_join:
          - '/'
          - - get_param: ControlPlaneIp
            - get_param: ControlPlaneSubnetCidr
    routes:
    -
      default: True
      next_hop:
        get_param: ControlPlaneDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: Storage1NetworkVlanID
    mtu:
      get_param: Storage1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: Storage1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageSupernet
      next_hop:
        get_param: Storage1InterfaceDefaultRoute
  - type: vlan
    device: nic1
    vlan_id:
      get_param: StorageMgmt1NetworkVlanID
    mtu:
      get_param: StorageMgmt1NetworkMtu
    addresses:
    - ip_netmask:
        get_param: StorageMgmt1IpSubnet
    routes:
    -
      ip_netmask:
        get_param: StorageMgmtSupernet
      next_hop:
        get_param: StorageMgmt1InterfaceDefaultRoute
  - type: interface
    name: nic2
    use_dhcp: false
  - type: interface
    name: nic3
    use_dhcp: false
  - type: interface
    name: nic4
    use_dhcp: false

Sample compute-nodes.yml Configuration File

content_copy zoom_out_map
# Copyright 2023 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#

# Each list item contains:
#    name: name of a node in the inventory (string)
#    profile: name of hardware profile, group of servers (optional, string)
#    leaf: leaf name (optional, string)

# List of nodes to use as compute role using Contrail DPDK vRouter
compute_nodes_dpdk:
  - name: computedpdk1
    leaf: '0'
    profile: hw2
  - name: computedpdk2
    leaf: '1'
    profile: hw3

# List of nodes to use as compute role using Sriov
compute_nodes_sriov:
  - name: computesriov1
    leaf: '0'
    profile: hw4
  - name: computesriov2
    leaf: '1'
    profile: hw5

# List of nodes to use as compute role using Contrail kernel vRouter
compute_nodes_kernel:
  - name: compute1
    leaf: '0'
    profile: hw0
  - name: compute2
    leaf: '0'
    profile: hw1
  - name: compute3
    leaf: '1'
    profile: hw1
  - name: compute4
    leaf: '1'
    profile: hw0

Sample storage-nodes.yml Configuration File

content_copy zoom_out_map
# Copyright 2023 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#

# List of nodes to use as storage host role
# List item contains:
#    name: name of a node in the inventory (string)
#    profile: name of hardware profile, group of servers (optional, string)
#    leaf: leaf name (optional, string)

storage_nodes:
  - name: storage1
    leaf: '0'
    profile: hw6
  - name: storage2
    leaf: '0'
    profile: hw6
  - name: storage3
    leaf: '1'
    profile: hw7

Sample K8’s-host-nodes.yml Configuration File

content_copy zoom_out_map
# Copyright 2023 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
# +-------------------+   +--------------+    +-----------------+
# |Undercloud/Jumphost|   |OpenStack Ctrl|    |OpenStack Compute|
# +-------------------+   +--------------+    +-----------------+
# |Provision & deploy |   |Neutron       |    |vrouter          |
# +-+-----------------+   |Heat          |    |                 |
#   |                     |Keystone      |    |                 |
#   |                     ++-------------+    |                 |
#   |                      |                  ++--+-------------+
#   |                      |                   |  |
#   |                      |                   |  |
#   |                      |                   |  +----+--------------------------------------tenant_ip_netmask------------+
#   |                      |                   |       |
#   |                      |                   |       |
#   |                      +------------+------+----------------------+-------------+---------internalapi_ip_netmask-------+
#   |                                   |              |              |             |
#   |                                   |              |              |             |
#   |                        +-------------------------------------------------------------+   +-----------+   +-----------+
#   |                        |          |              |              |             |      |   |           |   |           |
#   +--control_ip_netmask----+ +--------+--+   +-------+--+   +-------+--+  +-------+--+   |   |           |   |           |
#                            | |Config     |   |Control   |   |Command   |  |K8s api   |   |   |           |   |           |
#                            | +-----------+   +----------+   +----------+  +-+--------+   |   |           |   |           |
#                            | |Analytics  |                  |WebUI     |    |            |   |           |   |           |
#                            | +-----------+                  +-----+----+    |            |   |           |   |           |
#                            |                                      |         |            |   |           |   |           |
#                            |                                   +--+---------+--+         |   |           |   |           |
#                            |                 K8s host          |  VIP & PROXY  |         |   |  K8s host |   |  K8s host |
#                            +-----------------------------------+---------------+---------+   +-----------+   +-----------+
#                                                                   |         |
#                                                                   |         |
#                                                               +---+---------+---------------external_ip_netmask----------+
#
# Contrail control plane deployed on Kubernetes.
#
# Every node defined in k8s_host_nodes list should have following keys defined:
#  * name: hostname without dns domain. Later it will be concatenated with
#          global['domain'] value from site.yml.
#
#  * hypervisor: Name of control host defined in control-host-nodes.yml where the
#                k8s nodes VM will be hosted.
#
#  * control_ip_netmask: IP address in CIDR notation in control network.
#                        This network will be used to provision, deploy and access
#                        node.
#
#  * internalapi_ip_netmask: IP address in CIDR notation in internal api network.
#                            This network will be used for communication between
#                            OpenStack components like Neutron and Heat and
#                            Contrail services. Contrail vrouter will use this
#                            network to provision itself. K8s components utilises
#                            this network for internal communication.
#
#  * tenant_ip_netmask: IP address in CIDR notation in tenant network.
#                       Through this network vrouter will communicate with
#                       Contrail control.
#
#  * external_ip_netmask: IP address in CIDR notation in external network.
#                         This network will be used to setup external VIP
#                         managed by keepalived. Haproxy will be configured
#                         to expose services like Command, Contrail web ui and
#                         k8s api through this VIP.
#
#  * extra keys: nodes can define variables used later in network configuration
#                like dns servers, default gw etc.
common: &common
  dns_server1: "8.8.4.4"
  dns_server2: "8.8.8.8"
  gw: "192.2.0.254"

k8s_host_nodes:
  - name: k8s-contrail1
    hypervisor: controler1
    control_ip_netmask: "192.168.213.21/24"
    internalapi_ip_netmask: "172.16.0.21/24"
    tenant_ip_netmask: "172.17.131.21/24"
    external_ip_netmask: "192.2.0.230/25"
    <<: *common
  - name: k8s-contrail2
    hypervisor: controler2
    control_ip_netmask: "192.168.213.22/24"
    internalapi_ip_netmask: "172.16.0.22/24"
    tenant_ip_netmask: "172.17.131.22/24"
    external_ip_netmask: "192.2.0.231/25"
    <<: *common
  - name: k8s-contrail3
    hypervisor: controler3
    control_ip_netmask: "192.168.213.23/24"
    internalapi_ip_netmask: "172.16.0.23/24"
    tenant_ip_netmask: "172.17.131.23/24"
    external_ip_netmask: "192.2.0.232/25"
    <<: *common

# Template for network layout on all kubernetes cluster nodes
# This follows the syntax
# https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v1.html#network-config-v1
# or
# https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v2.html#network-config-v2
# variables from k8s_host_nodes can be refered with "{{ host.<variable> }}"
k8s_host_nodes_network_config:
  version: 1
  config:
  - type: physical
    name: enp1s0
    subnets:
    - type: static
      address: "{{ host.control_ip_netmask }}"
  - type: vlan
    name: "enp1s0.{{ overcloud.network.internal_api.vlan }}"
    vlan_link: enp1s0
    vlan_id: "{{ overcloud.network.internal_api.vlan }}"
    subnets:
    - type: static
      address: "{{ host.internalapi_ip_netmask }}"
      routes:
      - gateway: "{{ overcloud.network.internal_api.gateway }}"
        network: "{{ overcloud.network.internal_api.supernet | ipaddr('network') }}"
        netmask: "{{ overcloud.network.internal_api.supernet | ipaddr('netmask') }}"
  - type: physical
    name: enp2s0
    subnets:
    - type: static
      address: "{{ host.external_ip_netmask }}"
      gateway: "{{ host.gw }}"
      dns_nameservers:
        - "{{ host.dns_server1 }}"
        - "{{ host.dns_server2 }}"
  - type: physical
    name: enp3s0
  - type: vlan
    name: "enp3s0.{{ overcloud.network.tenant.vlan }}"
    vlan_link: enp3s0
    vlan_id: "{{ overcloud.network.tenant.vlan }}"
    subnets:
    - type: static
      address: "{{ host.tenant_ip_netmask }}"
      routes:
      - gateway: "{{ overcloud.network.tenant.gateway }}"
        network: "{{ overcloud.network.tenant.supernet | ipaddr('network') }}"
        netmask: "{{ overcloud.network.tenant.supernet | ipaddr('netmask') }}"

Sample vault-data.yml Configuration File

content_copy zoom_out_map
# This config structure can be used to hold information that needs to be encrypted for privacy
# If there is a password stored in /var/lib/contrail_cloud/config/.vault_password then it will be used
# Otherwise the password can be entered interactively
#
# This file can be edited with the "ansible-vault edit" command
# This file can be re-encrypted with a new password with the "ansible-vault rekey" command
vault:
  global:
    rhel:
      # Contrail Cloud Activation Key
      satellite:
        #SATELLITE_KEY
        key: "PUT_YOUR_KEY_HERE"
    # User account used for all Contrail Cloud automation
    # This account will be created on:
    #    - jumphost
    #    - control hosts
    #    - all overcloud roles
    #    - appformix controllers
    service_user:
      # Account Name
      name: "contrail"
      # Account Password
      password: "c0ntrail123"
      # Passphrase used to encrypt ssh key of service user.
      # If not defined ssh private key will not be encrypted.
      # ssh_key_passphrase: "c0ntrail123"
  rhvm:
    vm:
      # rhvm user name
      user: "contrail"
      # password for the rhvm vm user
      password: "c0ntrail123"
      # root password for the rhvm VM
      root_password: "c0ntrail123"
      # webui admin password
      admin_password: "c0ntrail123"
      # Passphrase used to encrypt ssh key of rhvm user.
      # If not defined ssh private key will not be encrypted.
      # ssh_key_passphrase: "c0ntrail123"
      vnc:
        # VNC console password for the rhvm VM
        password: "contrail123"
  undercloud:
    #Administrator password - default is randomly generated
    #admin_password: "c0ntrail123"
    vm:
      # undercloud user name
      user: "stack"
      # password for the undercloud vm user
      password: "contrail123"
      # root password for the undercloud VM
      root_password: "contrail123"
      # Passphrase used to encrypt ssh key of undercloud user.
      # If not defined ssh private key will not be encrypted.
      # ssh_key_passphrase: "c0ntrail123"
      vnc:
        # VNC console password for the undercloud VM
        password: "contrail123"
  overcloud:
    #Administrator password
    admin_password: "c0ntrail123"
    # Root password used for local login to overcloud nodes through console
    # root_password: "contrail123"
    contrail:
      rabbitmq:
        # contrail rabbitmq user name
        user: "contrail_rabbitmq"
        # contrail rabbitmq user password
        password: "c0ntrail123"
    registry:
      local_instance:
        openstack:
          username: "contrail"
          password: "c0ntrail123"
        contrail:
          username: "contrail"
          password: "c0ntrail123"
        contrail-insights:
          username: "contrail"
          password: "c0ntrail123"
  control_hosts:
    vm:
      vnc:
        # VNC console password for all control VMs
        password: "contrail123"
  appformix:
    mysql:
      # Approfmix MySQL user account
      user: "appformix"
      # Approfmix MySQL user password
      password: "c0ntrail123"
    rabbitmq:
      # Approfmix RabbitMQ user account
      user: "appformix"
      # Approfmix RabbitMQ user password
      password: "c0ntrail123"
  # Credentials used to connect external ceph cluster
  #ceph_external:
  #  client_key: "CLIENT_KEY"
  #  client_user: "openstack"

  # List of inventory hardware types that can hold hardware-specific properties
  # You can create similar configutations to allow reference from inventory-nodes.yml
  inventory_nodes:
    # A sample configuration for a hardware type
    hardware1:
      # IPMI user account for Ironic inevntory resources
      pm_user: "ADMIN"
      # IPMI user password for Ironic inevntory resources
      pm_password: "ADMIN"
    # A sample configuration for a hardware type
    hardware2:
      # IPMI user account for Ironic inevntory resource
      pm_user: "admin"
      # IPMI user password for Ironic inevntory resource
      pm_password: "admin"
  # User defined sensitive data can be stored under 'other' key.
  # Schema validation will only check if key,value format is used.
  #other:
  #  mykey: myvalue
footer-navigation