Jump to: navigation, search

Difference between revisions of "StarlingX/Networking"

(Useful Networking Commands)
(Useful Networking Commands)
Line 148: Line 148:
 
  kubectl exec -it $neutron-server -n openstack bash
 
  kubectl exec -it $neutron-server -n openstack bash
 
  cat /etc/neutron/neutron.conf | grep rpc_response_max_timeout
 
  cat /etc/neutron/neutron.conf | grep rpc_response_max_timeout
 +
 +
* Configure SR-IOV with OpenStack
 +
# Configure SR-IOV on your interface (such as enp65s0f0)
 +
export COMPUTE=controller-0
 +
PHYSNET0='physnet0'
 +
system host-lock ${COMPUTE}
 +
system datanetwork-add ${PHYSNET0} vlan
 +
system host-if-list -a ${COMPUTE}
 +
system host-if-modify -m 1500 -n sriov -c pci-sriov -N 5 ${COMPUTE} ${DATA0IFUUID}
 +
system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0}
 +
system interface-datanetwork-list ${COMPUTE}
 +
system host-unlock ${COMPUTE}
 +
# Create an instance on SR-IOV interface (make sure stx-openstack has been re-applied successfully)
 +
system application-list
 +
ADMINID=`openstack project list | grep admin | awk '{print $2}'`
 +
PHYSNET0='physnet0'
 +
PUBLICNET='public-net0'
 +
PUBLICSUBNET='public-subnet0'
 +
openstack network segment range create ${PHYSNET0}-a --network-type vlan --physical-network ${PHYSNET0}  --minimum 400 --maximum 499 --private --project ${ADMINID}
 +
openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET0} --provider-segment=400 ${PUBLICNET}
 +
openstack subnet create --project ${ADMINID} ${PUBLICSUBNET} --network ${PUBLICNET} --subnet-range 192.168.101.0/24
 +
openstack image create --container-format bare --disk-format qcow2 --file cirros-0.3.4-x86_64-disk.img cirros
 +
openstack image list
 +
net_id=`neutron net-show ${PHYSNET0} | grep "\ id\ " | awk '{ print $4 }'`
 +
port_id=`neutron port-create $net_id --name sriov_port --binding:vnic_type direct | grep "\ id\ " | awk '{ print $4 }'`
 +
openstack server create --flavor m1.tiny --image cirros --nic port-id=$port_id test-sriov

Revision as of 08:48, 12 July 2019

StarlingX Networking Sub-project

Team Information

  • Project Lead: Ghada Khalil <ghada.khalil@windriver.com> / Forrest Zhao <forrest.zhao@intel.com>
  • Technical Lead: Matt Peters <Matt.Peters@windriver.com> / Ruijing Guo <ruijing.guo@intel.com>
  • Contributors: Ruijing Guo <ruijing.guo@intel.com>; Matt Peters <Matt.Peters@windriver.com>; Brent Rowsell <Brent.Rowsell@windriver.com>; Ghada Khalil <Ghada.Khalil@windriver.com>; Allain Legacy <Allain.Legacy@windriver.com>; Steven Webster <Steven.Webster@windriver.com>; Joseph Richard <Joseph.Richard@windriver.com>; Teresa Ho <Teresa.Ho@windriver.com>; Patrick Bonnell <Patrick.Bonnell@windriver.com>; Kailun Qin <kailun.qin@intel.com>; Huifeng Le <huifeng.le@intel.com>; Chenjie Xu <chenjie.xu@intel.com>; Forrest Zhao <forrest.zhao@intel.com>

Team Meeting

Team Objective / Priorities

  • Responsible for developing features and addressing bugs related to StarlingX networking
  • Short Term Priorities (2018)
    • Upstream and/or resolve the networking patches carried by StarlingX
    • Implement StarlingX enhancements related to ovs-dpdk (see story board links below)
    • Fix StarlingX networking-related bugs (see launchpad links below)
      • This includes issues with running StarlingX and DPDK in a virtual environment; more info has been requested from the reporter.
  • Long Term Priorities (2019)
    • Containerized VNF support (if not already supported)
    • Implement support for Time Sensitive Networking
    • Integrate with ONAP and ONAP multi-cloud

Tags

All story board stories and launchpad bugs created for this team should use the tag "stx.networking".

Team Work Items

Status

Useful Networking Commands

  • When deploying OVS-DPDK, VMs must be configured to use a flavor with property: hw:mem_page_size=large
  • Using helm-overrides to enable the qos extension for neutron
# create a yaml file to enable the qos extension for neutron
cat > neutron-overrides.yaml <<EOF
conf:
 neutron:
   DEFAULT:
     service_plugins:
     - router
     - network_segment_range
     - qos
 plugins:
   ml2_conf:
     ml2:
       extension_drivers:
       - port_security
       - qos
   openvswitch_agent:
     agent:
       extensions:
       - qos
EOF

# update the neutron overrides and apply to stx-openstack
source /etc/platform/openrc
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
system application-apply stx-openstack

# in a separate shell, create the qos policy
export OS_CLOUD=openstack_helm
openstack network qos policy create bw-limit
  • Using helm-overrides to enable the trunk extension for neutron
# create a yaml file to enable the trunk extension for neutron
cat > neutron-overrides.yaml <<EOF
conf:
 neutron:
   DEFAULT:
     service_plugins:
     - router
     - network_segment_range
     - trunk
EOF

# update the neutron overrides and apply to stx-openstack
source /etc/platform/openrc
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
system application-apply stx-openstack

# In a separate shell, verify that the Trunk Extension and Trunk port details extensions are enabled
export OS_CLOUD=openstack_helm
openstack extension list --network | grep -i trunk
  • Using Calico global network policy to allow access to a host service
# create GlobalNetworkPolicy for VIM webserver access
kubectl apply -f - <<EOF
apiVersion: crd.projectcalico.org/v1
kind: GlobalNetworkPolicy
metadata:
  name: allow-vim-webserver
spec:
  ingress:
  - action: Allow
    destination:
      ports:
      - 32323
    protocol: TCP
  order: 500
  selector: has(iftype) && iftype == 'oam'
  types:
  - Ingress
EOF
  • Using helm-overrides to add configuration rpc_response_max_timeout in neutron.conf
# Maximum rpc timeout is now configurable by rpc_response_max_timeout from Neutron config instead of being calculated as 10 * rpc_response_timeout. This configuration can be used to change the maximum rpc timeout. If maximum rpc timeout is too big, some requests which should fail will be held for a long time before the server returns failure. If this value is too small and the server is very busy, the requests may need longer time than maximum rpc timeout and the requests will fail though they can succeed with a bigger maximum rpc timeout. 

# create a yaml file to add configuration rpc_response_max_timeout in neutron.conf
cat > neutron-overrides.yaml <<EOF
conf:
 neutron:
   DEFAULT:
     rpc_response_max_timeout: 600
EOF

# update the neutron overrides and apply to stx-openstack
source /etc/platform/openrc
system helm-override-update stx-openstack neutron openstack --values neutron-overrides.yaml
system application-apply stx-openstack

# verify that configuration rpc_response_max_time has been added in neutron.conf
kubectl get pod -n openstack | grep neutron
kubectl exec -it $neutron-server -n openstack bash
cat /etc/neutron/neutron.conf | grep rpc_response_max_timeout
  • Configure SR-IOV with OpenStack
# Configure SR-IOV on your interface (such as enp65s0f0)
export COMPUTE=controller-0
PHYSNET0='physnet0'
system host-lock ${COMPUTE}
system datanetwork-add ${PHYSNET0} vlan
system host-if-list -a ${COMPUTE}
system host-if-modify -m 1500 -n sriov -c pci-sriov -N 5 ${COMPUTE} ${DATA0IFUUID}
system interface-datanetwork-assign ${COMPUTE} ${DATA0IFUUID} ${PHYSNET0}
system interface-datanetwork-list ${COMPUTE}
system host-unlock ${COMPUTE}
# Create an instance on SR-IOV interface (make sure stx-openstack has been re-applied successfully)
system application-list
ADMINID=`openstack project list | grep admin | awk '{print $2}'`
PHYSNET0='physnet0'
PUBLICNET='public-net0'
PUBLICSUBNET='public-subnet0'
openstack network segment range create ${PHYSNET0}-a --network-type vlan --physical-network ${PHYSNET0}  --minimum 400 --maximum 499 --private --project ${ADMINID}
openstack network create --project ${ADMINID} --provider-network-type=vlan --provider-physical-network=${PHYSNET0} --provider-segment=400 ${PUBLICNET}
openstack subnet create --project ${ADMINID} ${PUBLICSUBNET} --network ${PUBLICNET} --subnet-range 192.168.101.0/24
openstack image create --container-format bare --disk-format qcow2 --file cirros-0.3.4-x86_64-disk.img cirros
openstack image list
net_id=`neutron net-show ${PHYSNET0} | grep "\ id\ " | awk '{ print $4 }'`
port_id=`neutron port-create $net_id --name sriov_port --binding:vnic_type direct | grep "\ id\ " | awk '{ print $4 }'`
openstack server create --flavor m1.tiny --image cirros --nic port-id=$port_id test-sriov