Jump to: navigation, search

Difference between revisions of "StarlingX/Networking"

(Useful Networking Commands)
(27 intermediate revisions by 6 users not shown)
Line 46: Line 46:
  
 
=== Useful Networking Commands ===
 
=== Useful Networking Commands ===
 +
* When deploying OVS-DPDK, VMs must be configured to use a flavor with property: hw:mem_page_size=large
 +
 +
* Configuring Networking Features
 +
** Consult the configuration section of [https://docs.openstack.org/neutron/stein/admin/index.html openstack networking guide] on how to configure the different features supported by neutron
 +
** For StarlingX, the configuration must be specified using helm-overrides. A direct change to the neutron.conf file is not supported for StarlingX. See examples below.
 +
** Useful References:
 +
*** [https://github.com/openstack/stx-config/blob/24da9f962365116936948ff5b05fc7f3cfc49602/kubernetes/applications/stx-openstack/stx-openstack-helm/stx-openstack-helm/manifests/manifest.yaml#L992 StarlingX Armada manifest]
 +
*** [https://github.com/openstack/openstack-helm/blob/master/neutron/values.yaml openstack-helm neutron values]
 +
** For more information on helm-overrides in StarlingX, consult the [[StarlingX/Containers/FAQ|StarlngX containers FAQ]]
 +
 
* Using helm-overrides to enable the qos extension for neutron
 
* Using helm-overrides to enable the qos extension for neutron
 
+
  # create a yaml file to enable the qos extension for neutron
  %% create a yaml file to enable the qos extension for neutron
 
 
  cat > neutron-overrides.yaml <<EOF
 
  cat > neutron-overrides.yaml <<EOF
 
  conf:
 
  conf:
Line 55: Line 64:
 
       service_plugins:
 
       service_plugins:
 
       - router
 
       - router
 +
      - network_segment_range
 
       - qos
 
       - qos
 
   plugins:
 
   plugins:
Line 68: Line 78:
 
  EOF
 
  EOF
 
   
 
   
  %% update the neutron overrides and apply to stx-openstack
+
  # update the neutron overrides and apply to stx-openstack
  system helm-override-update neutron openstack --values neutron-overrides.yaml
+
source /etc/platform/openrc
 +
  system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
 
  system application-apply stx-openstack
 
  system application-apply stx-openstack
 
   
 
   
  %% create qos policy
+
  # in a separate shell, create the qos policy
 
  export OS_CLOUD=openstack_helm
 
  export OS_CLOUD=openstack_helm
 
  openstack network qos policy create bw-limit
 
  openstack network qos policy create bw-limit
 +
 +
* Using helm-overrides to enable the trunk extension for neutron
 +
# create a yaml file to enable the trunk extension for neutron
 +
cat > neutron-overrides.yaml <<EOF
 +
conf:
 +
  neutron:
 +
    DEFAULT:
 +
      service_plugins:
 +
      - router
 +
      - network_segment_range
 +
      - trunk
 +
EOF
 +
 +
# update the neutron overrides and apply to stx-openstack
 +
source /etc/platform/openrc
 +
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
 +
system application-apply stx-openstack
 +
 +
# In a separate shell, verify that the Trunk Extension and Trunk port details extensions are enabled
 +
export OS_CLOUD=openstack_helm
 +
openstack extension list --network | grep -i trunk
 +
 +
* Using helm-overrides to enable internal dns
 +
# create a yaml file to enable internal dns resolution for neutron
 +
cat > neutron-overrides.yaml <<EOF
 +
conf:
 +
  neutron:
 +
    DEFAULT:
 +
      dns_domain: example.ca
 +
  plugins:
 +
    ml2_conf:
 +
      ml2:
 +
        extension_drivers:
 +
        - port_security
 +
        - dns
 +
EOF
 +
 +
# update the neutron overrides and apply to stx-openstack
 +
source /etc/platform/openrc
 +
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
 +
system application-apply stx-openstack
 +
 +
* Using helm-overrides to add configuration rpc_response_max_timeout in neutron.conf
 +
# Maximum rpc timeout is now configurable by rpc_response_max_timeout from Neutron config instead of being calculated as 10 * rpc_response_timeout. This configuration can be used to change the maximum rpc timeout. If maximum rpc timeout is too big, some requests which should fail will be held for a long time before the server returns failure. If this value is too small and the server is very busy, the requests may need longer time than maximum rpc timeout and the requests will fail though they can succeed with a bigger maximum rpc timeout.
 +
 +
# create a yaml file to add configuration rpc_response_max_timeout in neutron.conf
 +
cat > neutron-overrides.yaml <<EOF
 +
conf:
 +
  neutron:
 +
    DEFAULT:
 +
      rpc_response_max_timeout: 600
 +
EOF
 +
 +
# update the neutron overrides and apply to stx-openstack
 +
source /etc/platform/openrc
 +
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
 +
system application-apply stx-openstack
 +
 +
# verify that configuration rpc_response_max_time has been added in neutron.conf
 +
kubectl get pod -n openstack | grep neutron
 +
kubectl exec -it $neutron-server -n openstack bash
 +
cat /etc/neutron/neutron.conf | grep rpc_response_max_timeout
 +
 +
* Using Calico global network policy to allow access to a host service
 +
# create GlobalNetworkPolicy for VIM webserver access
 +
kubectl apply -f - <<EOF
 +
apiVersion: crd.projectcalico.org/v1
 +
kind: GlobalNetworkPolicy
 +
metadata:
 +
  name: allow-vim-webserver
 +
spec:
 +
  ingress:
 +
  - action: Allow
 +
    destination:
 +
      ports:
 +
      - 32323
 +
    protocol: TCP
 +
  order: 500
 +
  selector: has(iftype) && iftype == 'oam'
 +
  types:
 +
  - Ingress
 +
EOF
 +
 +
* Configure SR-IOV with OpenStack
 +
# Configure SR-IOV on your interface (such as enp65s0f0)
 +
export COMPUTE=controller-0
 +
PHYSNET0='physnet0'
 +
system host-lock ${COMPUTE}
 +
system datanetwork-add ${PHYSNET0} vlan
 +
system host-if-list -a ${COMPUTE}
 +
system host-if-modify -m 1500 -n sriov -c pci-sriov -N 5 ${COMPUTE} ${DATA0IFUUID}
 +
system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0}
 +
system interface-datanetwork-list ${COMPUTE}
 +
system host-unlock ${COMPUTE}
 +
 +
# Create an instance on SR-IOV interface (make sure stx-openstack has been re-applied successfully)
 +
system application-list
 +
ADMINID=`openstack project list | grep admin | awk '{print $2}'`
 +
PHYSNET0='physnet0'
 +
PUBLICNET='public-net0'
 +
PUBLICSUBNET='public-subnet0'
 +
openstack network segment range create ${PHYSNET0}-a --network-type vlan --physical-network ${PHYSNET0}  --minimum 400 --maximum 499 --private --project ${ADMINID}
 +
openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET0} --provider-segment=400 ${PUBLICNET}
 +
openstack subnet create --project ${ADMINID} ${PUBLICSUBNET} --network ${PUBLICNET} --subnet-range 192.168.101.0/24
 +
openstack image create --container-format bare --disk-format qcow2 --file cirros-0.3.4-x86_64-disk.img cirros
 +
openstack image list
 +
net_id=`neutron net-show ${PUBLICNET} | grep "\ id\ " | awk '{ print $4 }'`
 +
port_id=`neutron port-create $net_id --name sriov_port --binding:vnic_type direct | grep "\ id\ " | awk '{ print $4 }'`
 +
openstack server create --flavor m1.tiny --image cirros --nic port-id=$port_id test-sriov
 +
 +
*Configure the flat network.
 +
system host-lock ${COMPUTE}
 +
 +
#If the interface has been bound to other datanetwork, need to remove the binding.
 +
system interface-datanetwork-list ${COMPUTE}
 +
system interface-datanetwork-remove ${PHYSNETDATA0UUID}
 +
 +
#Create a flat datanetwork and bind it to the interface.
 +
system datanetwork-add phy-flat flat
 +
system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID}
 +
system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} phy-flat
 +
system host-unlock ${COMPUTE}
 +
 +
#Check if the application has been re-applied successfully.
 +
system application-list
 +
 +
#Check pod are initialized correctly
 +
kubectl -n openstack get pod | grep -v Running | grep -v Complete
 +
 +
#Create a flat network connected to the phy-flat datanetwork.
 +
export OS_CLOUD=openstack_helm
 +
ADMINID=`openstack project list | grep admin | awk '{print $2}'`
 +
openstack network create --project ${ADMINID} --provider-network-type=flat --provider-physical-network=phy-flat netflat
 +
openstack subnet create --project ${ADMINID} netflat-subnet --network netflat --subnet-range 192.168.103.0/24
 +
 +
#Create a server and ping to check the network is set up correctly if needed.
 +
openstack server create --image cirros --flavor m1.tiny --network netflat vm1
 +
 +
*Pass through a physical NIC to VM by binding a port with vnic_type direct_physical to the VM.
 +
# This method should be used only for the NIC which supports SR-IOV. If the file "/sys/class/net/$IF_NAME/device/sriov_numvfs" exists, the NIC should support SR-IOV. For NIC which doesn't support SR-IOV such as i210 NIC, the way "Pass through a physical NIC to VM by using PCI PASSTHROUGH" should be used and the device_type should be "type-PCI".
 +
 +
# Use system command to configure interface. One is used for PCI PASSTHROUGH and the other is a normal interface.
 +
  export COMPUTE=controller-0
 +
  PHYSNET0='physnet0'
 +
  PHYSNET1='physnet1'
 +
  system host-lock ${COMPUTE}
 +
  system datanetwork-add ${PHYSNET0} vlan
 +
  system datanetwork-add ${PHYSNET1} vlan
 +
  system host-if-list -a ${COMPUTE}
 +
  system host-if-modify -m 1500 -n pcipass -c pci-passthrough ${COMPUTE} ${DATA0IFUUID}
 +
  system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID}
 +
  system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0}
 +
  system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1}
 +
  system interface-datanetwork-list ${COMPUTE}
 +
  system host-unlock ${COMPUTE}
 +
  # make sure stx-openstack has been re-applied successfully
 +
  system application-list
 +
 +
# Create keypair and security group
 +
  mkdir -p /home/sysadmin/.ssh/
 +
  vi /home/sysadmin/.ssh/id_rsa
 +
  openstack keypair create key1 --private-key /home/sysadmin/.ssh/id_rsa
 +
  openstack security group create security1
 +
  openstack security group rule create --ingress --protocol icmp --remote-ip 0.0.0.0/0 security1
 +
  openstack security group rule create --ingress --protocol tcp --remote-ip 0.0.0.0/0 security1
 +
  openstack security group rule create --ingress --protocol udp --remote-ip 0.0.0.0/0 security1
 +
 +
# Create networks and subnets. Upload the ubuntu image.
 +
  export OS_CLOUD=openstack_helm
 +
  ADMINID=`openstack project list | grep admin | awk '{print $2}'`
 +
  PHYSNET0='physnet0'
 +
  PHYSNET1='physnet1'
 +
  PUBLICNET0='public-net0'
 +
  PUBLICNET1='public-net1'
 +
  PUBLICSUBNET0='public-subnet0'
 +
  PUBLICSUBNET1='public-subnet1'
 +
  openstack network segment range create ${PHYSNET0}-a --network-type vlan --physical-network ${PHYSNET0} --minimum 400 --maximum 499 --private --project ${ADMINID}
 +
  openstack network segment range create ${PHYSNET1}-a --network-type vlan --physical-network ${PHYSNET1} --minimum 500 --maximum 599 --private --project ${ADMINID}
 +
  openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET0} --provider-segment=400 ${PUBLICNET0}
 +
  openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET1} --provider-segment=500 ${PUBLICNET1}
 +
  openstack subnet create --project ${ADMINID} ${PUBLICSUBNET0} --network ${PUBLICNET0} --subnet-range 192.168.101.0/24
 +
  openstack subnet create --project ${ADMINID} ${PUBLICSUBNET1} --network ${PUBLICNET1} --subnet-range 192.168.102.0/24
 +
  wget http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
 +
  openstack image create --container-format bare --disk-format qcow2 --file xenial-server-cloudimg-amd64-disk1.img ubuntu
 +
  openstack image list
 +
 +
# Create PF port whose vnic_type is direct-physical
 +
  net_id=`neutron net-show ${PUBLICNET0} | grep "\ id\ " | awk '{ print $4 }'`
 +
  port_id=`neutron port-create $net_id --name pf-port --binding:vnic_type direct-physical | grep "\ id\ " | awk '{ print $4 }'`
 +
 +
# Create VM with one PF port and one normal port which is used to ssh to the VM
 +
  openstack server create --image ubuntu --flavor m1.small --nic port-id=$port_id --network ${PUBLICNET1}
 +
--security-group security1 --key-name key1 test-pci
 +
 +
*Pass through a physical NIC to VM by using PCI PASSTHROUGH.
 +
  # Use system command to configure interface. One is used for PCI PASSTHROUGH and the other is a normal interface.
 +
    export COMPUTE=controller-0
 +
    PHYSNET0='physnet0'
 +
    PHYSNET1='physnet1'
 +
    system host-lock ${COMPUTE}
 +
    system datanetwork-add ${PHYSNET0} vlan
 +
    system datanetwork-add ${PHYSNET1} vlan
 +
    system host-if-list -a ${COMPUTE}
 +
    system host-if-modify -m 1500 -n pcipass -c pci-passthrough ${COMPUTE} ${DATA0IFUUID}
 +
    system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID}
 +
    system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0}
 +
    system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1}
 +
    system interface-datanetwork-list ${COMPUTE}
 +
 +
  # create a yaml file to configure pci alias in nova.conf.
 +
    cat > nova-overrides.yaml <<EOF
 +
    conf:
 +
    nova:
 +
      DEFAULT:
 +
        debug: True
 +
      pci:
 +
        alias:
 +
            type: multistring
 +
            values:
 +
            - '{"vendor_id": "8086", "product_id": "37d2","device_type":"type-PF","name": "intel-X722-pf"}'
 +
    EOF
 +
 +
# You can retrieve the vendor_id, product_id by command "lspci -nn | grep -i eth". For example:
 +
  lspci -nn | grep -i eth
 +
  41:00.0 Ethernet controller [0200]: Intel Corporation Ethernet Connection X722 for 10GBASE-T [8086:37d2] (rev 04)
 +
  8086 is the vendor_id and 37d2 is product_id.
 +
  device_type can be one of three values: type-PCI, type-PF and type-VF.
 +
  type-PCI: for NIC which doesn't support SR-IOV such as i210 NIC.
 +
  type-PF:  for NIC which supports SR-IOV. type-PF allows you to pass through the PF to be controlled by the VMs. This is sometimes useful in NFV use-cases.
 +
  type-VF:  for NIC which supports SR-IOV. type-VF allows you to pass through VFs to the VMs.
 +
 +
# update the nova overrides and apply to stx-openstack
 +
  system helm-override-update stx-openstack nova openstack --values nova-overrides.yaml
 +
 +
# Unlock host and make sure stx-openstack has been re-applied successfully
 +
  system host-unlock ${COMPUTE}
 +
  system application-list
 +
 +
# Create keypair and security group
 +
  mkdir -p /home/sysadmin/.ssh/
 +
  vi /home/sysadmin/.ssh/id_rsa
 +
  openstack keypair create key1 --private-key /home/sysadmin/.ssh/id_rsa
 +
  openstack security group create security1
 +
  openstack security group rule create --ingress --protocol icmp --remote-ip 0.0.0.0/0 security1
 +
  openstack security group rule create --ingress --protocol tcp --remote-ip 0.0.0.0/0 security1
 +
  openstack security group rule create --ingress --protocol udp --remote-ip 0.0.0.0/0 security1
 +
 +
# Create flavor and set property "pci_passthrough:alias". When you create VM using flavor with property "pci_passthrough:alias", Nova will know this VM shall be passed a physical NIC selected by the alias.
 +
  openstack flavor create --ram 4096 --disk 100 --vcpus 2 m1.medium.pci_passthrough
 +
  openstack flavor set --property "pci_passthrough:alias"="intel-X722-pf:1" m1.medium.pci_passthrough
 +
 +
# Create networks and subnets. Upload the ubuntu image.
 +
  export OS_CLOUD=openstack_helm
 +
  ADMINID=`openstack project list | grep admin | awk '{print $2}'`
 +
  PHYSNET1='physnet1'
 +
  PUBLICNET1='public-net1'
 +
  PUBLICSUBNET1='public-subnet1'
 +
  openstack network segment range create ${PHYSNET1}-a --network-type vlan --physical-network ${PHYSNET1} --minimum 500 --maximum 599 --private --project ${ADMINID}
 +
  openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET1} --provider-segment=500 ${PUBLICNET1}
 +
  openstack subnet create --project ${ADMINID} ${PUBLICSUBNET1} --network ${PUBLICNET1} --subnet-range 192.168.102.0/24
 +
  wget http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
 +
  openstack image create --container-format bare --disk-format qcow2 --file xenial-server-cloudimg-amd64-disk1.img ubuntu
 +
  openstack image list
 +
 +
# Create VM by following command:
 +
  openstack server create --image ubuntu --flavor m1.medium.pci_passthrough --network ${PUBLICNET1} --security-group security1 --key-name key1 test-pci
 +
 +
=== Deploying and Running TSN application in STX Virtual Machine Workload Mode ===
 +
Reference page: [[StarlingX/Networking/TSN|Deploying and Running TSN Application]]

Revision as of 02:18, 4 September 2019

StarlingX Networking Sub-project

Team Information

  • Project Lead: Ghada Khalil <ghada.khalil@windriver.com> / Forrest Zhao <forrest.zhao@intel.com>
  • Technical Lead: Matt Peters <Matt.Peters@windriver.com> / Ruijing Guo <ruijing.guo@intel.com>
  • Contributors: Ruijing Guo <ruijing.guo@intel.com>; Matt Peters <Matt.Peters@windriver.com>; Brent Rowsell <Brent.Rowsell@windriver.com>; Ghada Khalil <Ghada.Khalil@windriver.com>; Allain Legacy <Allain.Legacy@windriver.com>; Steven Webster <Steven.Webster@windriver.com>; Joseph Richard <Joseph.Richard@windriver.com>; Teresa Ho <Teresa.Ho@windriver.com>; Patrick Bonnell <Patrick.Bonnell@windriver.com>; Kailun Qin <kailun.qin@intel.com>; Huifeng Le <huifeng.le@intel.com>; Chenjie Xu <chenjie.xu@intel.com>; Forrest Zhao <forrest.zhao@intel.com>

Team Meeting

Team Objective / Priorities

  • Responsible for developing features and addressing bugs related to StarlingX networking
  • Short Term Priorities (2018)
    • Upstream and/or resolve the networking patches carried by StarlingX
    • Implement StarlingX enhancements related to ovs-dpdk (see story board links below)
    • Fix StarlingX networking-related bugs (see launchpad links below)
      • This includes issues with running StarlingX and DPDK in a virtual environment; more info has been requested from the reporter.
  • Long Term Priorities (2019)
    • Containerized VNF support (if not already supported)
    • Implement support for Time Sensitive Networking
    • Integrate with ONAP and ONAP multi-cloud

Tags

All story board stories and launchpad bugs created for this team should use the tag "stx.networking".

Team Work Items

Status

Useful Networking Commands

  • When deploying OVS-DPDK, VMs must be configured to use a flavor with property: hw:mem_page_size=large
  • Using helm-overrides to enable the qos extension for neutron
# create a yaml file to enable the qos extension for neutron
cat > neutron-overrides.yaml <<EOF
conf:
 neutron:
   DEFAULT:
     service_plugins:
     - router
     - network_segment_range
     - qos
 plugins:
   ml2_conf:
     ml2:
       extension_drivers:
       - port_security
       - qos
   openvswitch_agent:
     agent:
       extensions:
       - qos
EOF

# update the neutron overrides and apply to stx-openstack
source /etc/platform/openrc
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
system application-apply stx-openstack

# in a separate shell, create the qos policy
export OS_CLOUD=openstack_helm
openstack network qos policy create bw-limit
  • Using helm-overrides to enable the trunk extension for neutron
# create a yaml file to enable the trunk extension for neutron
cat > neutron-overrides.yaml <<EOF
conf:
 neutron:
   DEFAULT:
     service_plugins:
     - router
     - network_segment_range
     - trunk
EOF

# update the neutron overrides and apply to stx-openstack
source /etc/platform/openrc
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
system application-apply stx-openstack

# In a separate shell, verify that the Trunk Extension and Trunk port details extensions are enabled
export OS_CLOUD=openstack_helm
openstack extension list --network | grep -i trunk
  • Using helm-overrides to enable internal dns
# create a yaml file to enable internal dns resolution for neutron
cat > neutron-overrides.yaml <<EOF
conf:
 neutron:
   DEFAULT:
     dns_domain: example.ca
 plugins:
   ml2_conf:
     ml2:
       extension_drivers:
       - port_security
       - dns
EOF

# update the neutron overrides and apply to stx-openstack
source /etc/platform/openrc
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
system application-apply stx-openstack
  • Using helm-overrides to add configuration rpc_response_max_timeout in neutron.conf
# Maximum rpc timeout is now configurable by rpc_response_max_timeout from Neutron config instead of being calculated as 10 * rpc_response_timeout. This configuration can be used to change the maximum rpc timeout. If maximum rpc timeout is too big, some requests which should fail will be held for a long time before the server returns failure. If this value is too small and the server is very busy, the requests may need longer time than maximum rpc timeout and the requests will fail though they can succeed with a bigger maximum rpc timeout. 

# create a yaml file to add configuration rpc_response_max_timeout in neutron.conf
cat > neutron-overrides.yaml <<EOF
conf:
 neutron:
   DEFAULT:
     rpc_response_max_timeout: 600
EOF

# update the neutron overrides and apply to stx-openstack
source /etc/platform/openrc
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
system application-apply stx-openstack

# verify that configuration rpc_response_max_time has been added in neutron.conf
kubectl get pod -n openstack | grep neutron
kubectl exec -it $neutron-server -n openstack bash
cat /etc/neutron/neutron.conf | grep rpc_response_max_timeout
  • Using Calico global network policy to allow access to a host service
# create GlobalNetworkPolicy for VIM webserver access
kubectl apply -f - <<EOF
apiVersion: crd.projectcalico.org/v1
kind: GlobalNetworkPolicy
metadata:
  name: allow-vim-webserver
spec:
  ingress:
  - action: Allow
    destination:
      ports:
      - 32323
    protocol: TCP
  order: 500
  selector: has(iftype) && iftype == 'oam'
  types:
  - Ingress
EOF
  • Configure SR-IOV with OpenStack
# Configure SR-IOV on your interface (such as enp65s0f0)
export COMPUTE=controller-0
PHYSNET0='physnet0'
system host-lock ${COMPUTE}
system datanetwork-add ${PHYSNET0} vlan
system host-if-list -a ${COMPUTE}
system host-if-modify -m 1500 -n sriov -c pci-sriov -N 5 ${COMPUTE} ${DATA0IFUUID}
system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0}
system interface-datanetwork-list ${COMPUTE}
system host-unlock ${COMPUTE}

# Create an instance on SR-IOV interface (make sure stx-openstack has been re-applied successfully)
system application-list
ADMINID=`openstack project list | grep admin | awk '{print $2}'`
PHYSNET0='physnet0'
PUBLICNET='public-net0'
PUBLICSUBNET='public-subnet0'
openstack network segment range create ${PHYSNET0}-a --network-type vlan --physical-network ${PHYSNET0}  --minimum 400 --maximum 499 --private --project ${ADMINID}
openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET0} --provider-segment=400 ${PUBLICNET}
openstack subnet create --project ${ADMINID} ${PUBLICSUBNET} --network ${PUBLICNET} --subnet-range 192.168.101.0/24
openstack image create --container-format bare --disk-format qcow2 --file cirros-0.3.4-x86_64-disk.img cirros
openstack image list
net_id=`neutron net-show ${PUBLICNET} | grep "\ id\ " | awk '{ print $4 }'`
port_id=`neutron port-create $net_id --name sriov_port --binding:vnic_type direct | grep "\ id\ " | awk '{ print $4 }'`
openstack server create --flavor m1.tiny --image cirros --nic port-id=$port_id test-sriov
  • Configure the flat network.
system host-lock ${COMPUTE} 

#If the interface has been bound to other datanetwork, need to remove the binding.
system interface-datanetwork-list ${COMPUTE}
system interface-datanetwork-remove ${PHYSNETDATA0UUID}

#Create a flat datanetwork and bind it to the interface.
system datanetwork-add phy-flat flat 
system host-if-modify -m 1500 -n data0 -c data ${COMPUTE} ${DATA0IFUUID} 
system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} phy-flat 
system host-unlock ${COMPUTE}

#Check if the application has been re-applied successfully. 
system application-list 

#Check pod are initialized correctly
kubectl -n openstack get pod | grep -v Running | grep -v Complete

#Create a flat network connected to the phy-flat datanetwork. 
export OS_CLOUD=openstack_helm
ADMINID=`openstack project list | grep admin | awk '{print $2}'`
openstack network create --project ${ADMINID} --provider-network-type=flat --provider-physical-network=phy-flat netflat 
openstack subnet create --project ${ADMINID} netflat-subnet --network netflat --subnet-range 192.168.103.0/24 

#Create a server and ping to check the network is set up correctly if needed.
openstack server create --image cirros --flavor m1.tiny --network netflat vm1
  • Pass through a physical NIC to VM by binding a port with vnic_type direct_physical to the VM.
# This method should be used only for the NIC which supports SR-IOV. If the file "/sys/class/net/$IF_NAME/device/sriov_numvfs" exists, the NIC should support SR-IOV. For NIC which doesn't support SR-IOV such as i210 NIC, the way "Pass through a physical NIC to VM by using PCI PASSTHROUGH" should be used and the device_type should be "type-PCI". 

# Use system command to configure interface. One is used for PCI PASSTHROUGH and the other is a normal interface.
  export COMPUTE=controller-0
  PHYSNET0='physnet0'
  PHYSNET1='physnet1'
  system host-lock ${COMPUTE}
  system datanetwork-add ${PHYSNET0} vlan
  system datanetwork-add ${PHYSNET1} vlan
  system host-if-list -a ${COMPUTE}
  system host-if-modify -m 1500 -n pcipass -c pci-passthrough ${COMPUTE} ${DATA0IFUUID}
  system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID}
  system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0}
  system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1}
  system interface-datanetwork-list ${COMPUTE}
  system host-unlock ${COMPUTE}
  # make sure stx-openstack has been re-applied successfully
  system application-list

# Create keypair and security group
  mkdir -p /home/sysadmin/.ssh/
  vi /home/sysadmin/.ssh/id_rsa
  openstack keypair create key1 --private-key /home/sysadmin/.ssh/id_rsa
  openstack security group create security1
  openstack security group rule create --ingress --protocol icmp --remote-ip 0.0.0.0/0 security1
  openstack security group rule create --ingress --protocol tcp --remote-ip 0.0.0.0/0 security1
  openstack security group rule create --ingress --protocol udp --remote-ip 0.0.0.0/0 security1

# Create networks and subnets. Upload the ubuntu image.
  export OS_CLOUD=openstack_helm
  ADMINID=`openstack project list | grep admin | awk '{print $2}'`
  PHYSNET0='physnet0'
  PHYSNET1='physnet1'
  PUBLICNET0='public-net0'
  PUBLICNET1='public-net1'
  PUBLICSUBNET0='public-subnet0'
  PUBLICSUBNET1='public-subnet1'
  openstack network segment range create ${PHYSNET0}-a --network-type vlan --physical-network ${PHYSNET0} --minimum 400 --maximum 499 --private --project ${ADMINID}
  openstack network segment range create ${PHYSNET1}-a --network-type vlan --physical-network ${PHYSNET1} --minimum 500 --maximum 599 --private --project ${ADMINID}
  openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET0} --provider-segment=400 ${PUBLICNET0}
  openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET1} --provider-segment=500 ${PUBLICNET1}
  openstack subnet create --project ${ADMINID} ${PUBLICSUBNET0} --network ${PUBLICNET0} --subnet-range 192.168.101.0/24
  openstack subnet create --project ${ADMINID} ${PUBLICSUBNET1} --network ${PUBLICNET1} --subnet-range 192.168.102.0/24
  wget http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
  openstack image create --container-format bare --disk-format qcow2 --file xenial-server-cloudimg-amd64-disk1.img ubuntu
  openstack image list

# Create PF port whose vnic_type is direct-physical
  net_id=`neutron net-show ${PUBLICNET0} | grep "\ id\ " | awk '{ print $4 }'`
  port_id=`neutron port-create $net_id --name pf-port --binding:vnic_type direct-physical | grep "\ id\ " | awk '{ print $4 }'`

# Create VM with one PF port and one normal port which is used to ssh to the VM
  openstack server create --image ubuntu --flavor m1.small --nic port-id=$port_id --network ${PUBLICNET1}
--security-group security1 --key-name key1 test-pci
  • Pass through a physical NIC to VM by using PCI PASSTHROUGH.
 # Use system command to configure interface. One is used for PCI PASSTHROUGH and the other is a normal interface.
   export COMPUTE=controller-0
   PHYSNET0='physnet0'
   PHYSNET1='physnet1'
   system host-lock ${COMPUTE}
   system datanetwork-add ${PHYSNET0} vlan
   system datanetwork-add ${PHYSNET1} vlan
   system host-if-list -a ${COMPUTE}
   system host-if-modify -m 1500 -n pcipass -c pci-passthrough ${COMPUTE} ${DATA0IFUUID}
   system host-if-modify -m 1500 -n data1 -c data ${COMPUTE} ${DATA1IFUUID}
   system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0}
   system interface-datanetwork-assign ${COMPUTE} ${DATA1IFUUID} ${PHYSNET1}
   system interface-datanetwork-list ${COMPUTE}

 # create a yaml file to configure pci alias in nova.conf. 
   cat > nova-overrides.yaml <<EOF
   conf:
   nova:
     DEFAULT:
       debug: True
     pci:
       alias:
           type: multistring
           values:
           - '{"vendor_id": "8086", "product_id": "37d2","device_type":"type-PF","name": "intel-X722-pf"}'
   EOF

# You can retrieve the vendor_id, product_id by command "lspci -nn | grep -i eth". For example:
  lspci -nn | grep -i eth
  41:00.0 Ethernet controller [0200]: Intel Corporation Ethernet Connection X722 for 10GBASE-T [8086:37d2] (rev 04)
  8086 is the vendor_id and 37d2 is product_id.
  device_type can be one of three values: type-PCI, type-PF and type-VF.
  type-PCI: for NIC which doesn't support SR-IOV such as i210 NIC.
  type-PF:  for NIC which supports SR-IOV. type-PF allows you to pass through the PF to be controlled by the VMs. This is sometimes useful in NFV use-cases.
  type-VF:  for NIC which supports SR-IOV. type-VF allows you to pass through VFs to the VMs.

# update the nova overrides and apply to stx-openstack
  system helm-override-update stx-openstack nova openstack --values nova-overrides.yaml

# Unlock host and make sure stx-openstack has been re-applied successfully
  system host-unlock ${COMPUTE}
  system application-list

# Create keypair and security group
  mkdir -p /home/sysadmin/.ssh/
  vi /home/sysadmin/.ssh/id_rsa
  openstack keypair create key1 --private-key /home/sysadmin/.ssh/id_rsa
  openstack security group create security1
  openstack security group rule create --ingress --protocol icmp --remote-ip 0.0.0.0/0 security1
  openstack security group rule create --ingress --protocol tcp --remote-ip 0.0.0.0/0 security1
  openstack security group rule create --ingress --protocol udp --remote-ip 0.0.0.0/0 security1

# Create flavor and set property "pci_passthrough:alias". When you create VM using flavor with property "pci_passthrough:alias", Nova will know this VM shall be passed a physical NIC selected by the alias. 
  openstack flavor create --ram 4096 --disk 100 --vcpus 2 m1.medium.pci_passthrough
  openstack flavor set --property "pci_passthrough:alias"="intel-X722-pf:1" m1.medium.pci_passthrough

# Create networks and subnets. Upload the ubuntu image.
  export OS_CLOUD=openstack_helm
  ADMINID=`openstack project list | grep admin | awk '{print $2}'`
  PHYSNET1='physnet1'
  PUBLICNET1='public-net1'
  PUBLICSUBNET1='public-subnet1'
  openstack network segment range create ${PHYSNET1}-a --network-type vlan --physical-network ${PHYSNET1} --minimum 500 --maximum 599 --private --project ${ADMINID}
  openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET1} --provider-segment=500 ${PUBLICNET1}
  openstack subnet create --project ${ADMINID} ${PUBLICSUBNET1} --network ${PUBLICNET1} --subnet-range 192.168.102.0/24
  wget http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
  openstack image create --container-format bare --disk-format qcow2 --file xenial-server-cloudimg-amd64-disk1.img ubuntu
  openstack image list

# Create VM by following command:
  openstack server create --image ubuntu --flavor m1.medium.pci_passthrough --network ${PUBLICNET1} --security-group security1 --key-name key1 test-pci

Deploying and Running TSN application in STX Virtual Machine Workload Mode

Reference page: Deploying and Running TSN Application