Saturday, November 28, 2015

Nova and Neutron work-flow && CLI for HAProxy/Keepalived 3 Node Controller RDO Liberty

The correct name of this post is supposed to be "Nova and Neutron workflow && CLI for HAProxy/Keepalived 3 Node Controller RDO Liberty in an appropriate amount of detail". It follows up http://lxer.com/module/newswire/view/222164/index.html . All environment has been built via Nova and Neutron CLI ( no Horizon involvement ).
Neutron work-flow on Controller is described including OVS flow rules on external bridge created by flat external network provider , eth0 external interface as VLAN OVS port of bridge br-eth0 and br-int

First create keystonerc_admin to provide admin ability manage via CLI

[root@hacontroller1 ~(keystone_admin)]# cat keystonerc_admin
export OS_USERNAME=admin
export OS_TENANT_NAME=admin
export OS_PROJECT_NAME=admin
export OS_REGION_NAME=regionOne
export OS_PASSWORD=keystonetest
export OS_AUTH_URL=http://controller-vip.example.com:35357/v2.0/
export OS_SERVICE_ENDPOINT=http://controller-vip.example.com:35357/v2.0 export OS_SERVICE_TOKEN=$(cat /root/keystone_service_token)
export PS1='[\u@\h \W(keystone_admin)]\$ '

[root@hacontroller1 ~(keystone_admin)]# cat keystonerc_demo
export OS_USERNAME=demo
export OS_TENANT_NAME=demo
export OS_PROJECT_NAME=demo
export OS_REGION_NAME=regionOne
export OS_PASSWORD=redhat
export OS_AUTH_URL=http://controller-vip.example.com:5000/v2.0/
export PS1='[\u@\h \W(keystone_demo)]\$ '

 
[root@hacontroller1 ~(keystone_admin)]#  keystone tenant-list
+----------------------------------+----------+---------+
|                id                |   name   | enabled |
+----------------------------------+----------+---------+
| b2be742697534c3188bdc5ec56038853 |  admin   |   True  |
| efe017b919c1487bab8c58281fcaceeb |   demo   |   True  |
| 4cd322b30ca947eeb86c0a883e549a27 | services |   True  |
+----------------------------------+----------+---------+

****************************************************
Creating HA Neutron router belongs tenant demo
****************************************************

[root@hacontroller1 ~(keystone_admin)]# neutron router-create --ha True \
--tenant-id efe017b919c1487bab8c58281fcaceeb RouterDMS

[root@hacontroller1 ~(keystone_admin)]# neutron l3-agent-list-hosting-router RouterDMS
+--------------------------------------+---------------------------+----------------+-------+----------+
| id                                   | host                      | admin_state_up | alive | ha_state |
+--------------------------------------+---------------------------+----------------+-------+----------+
| 9c83e688-e7b4-4101-97df-844510d0ee52 | hacontroller1.example.com | True           | :-)   | active   |
| a7bdf03e-4550-4f1b-ae6f-25744894086d | hacontroller2.example.com | True           | :-)   | standby  |
+--------------------------------------+---------------------------+----------------+-------+------- 
 
[root@hacontroller1 ~(keystone_admin)]# neutron router-port-list RouterDMS
+--------------------------------------+-------------------------------------------------+-------------------+--------------------------------------------------------------------------------------+
| id                                   | name                                            | mac_address       | fixed_ips                                                                            |
+--------------------------------------+-------------------------------------------------+-------------------+--------------------------------------------------------------------------------------+
| 013404f6-0bb8-4885-ab64-19e3970d7046 | HA port tenant efe017b919c1487bab8c58281fcaceeb | fa:16:3e:d5:7e:6f | {"subnet_id": "6886d46c-4947-455d-8656-ff0f2a649632", "ip_address": "169.254.192.2"} |
| 4a6bcdf6-7895-45b7-b32b-c63d6e1c6bc5 | HA port tenant efe017b919c1487bab8c58281fcaceeb | fa:16:3e:35:f7:73 | {"subnet_id": "6886d46c-4947-455d-8656-ff0f2a649632", "ip_address": "169.254.192.1"} |
| a37e106e-70a1-47bc-b8de-1153c8cacd2a |                                                 | fa:16:3e:0e:4b:eb | {"subnet_id": "a2c617b1-17cc-4768-b213-9f0795d07b40", "ip_address": "10.10.10.100"}  |
| dd6507fd-73e3-45f6-a935-8bbf29dacbb9 |                                                 | fa:16:3e:26:55:06 | {"subnet_id": "1c47d964-d7ec-4a72-a5a7-bc390c96359d", "ip_address": "30.0.0.1"}      |
+--------------------------------------+-------------------------------------------------+-------------------+------------------------------------------------------------------------------------ 
 
**************************************
Creating private network as demo
**************************************

[root@hacontroller2 ~(keystone_demo)]#  neutron net-create private
[root@hacontroller2 ~(keystone_demo)]#  neutron subnet-create private \
30.0.0.0/24 --dns_nameservers list=true 83.221.202.254

**************************************
Creating public  network as admin
**************************************

[root@hacontroller1 ~(keystone_admin)]# neutron net-create public --shared \
--provider:network_type flat --provider:physical_network physnet1 --router:external

[root@hacontroller1 ~(keystone_admin)]# neutron subnet-create --gateway 10.10.10.1 \
 --allocation-pool start=10.10.10.100,end=10.10.10.150 --disable-dhcp \
--name public_subnet public 10.10.10.0/24
 
[root@hacontroller1 neutron(keystone_demo)]# cat l3_agent.ini | grep -v ^# | grep -v ^$
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
handle_internal_only_routers = True
send_arp_for_ha = 3
metadata_ip = controller-vip.example.com
external_network_bridge = 
[AGENT]
 
[root@hacontroller1 ml2(keystone_admin)]# cat ml2_conf.ini|grep -v ^#|grep -v ^$
[ml2]
type_drivers = local,gre,flat,vxlan,vlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch
[ml2_type_flat]
flat_networks = *
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 10:10000
[ml2_type_vxlan]
vni_ranges = 10:10000
vxlan_group = 224.0.0.1
[ml2_type_geneve]
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
 
 

[root@hacontroller1 ml2(keystone_admin)]# cat openvswitch_agent.ini | grep -v ^#|grep -v ^$
[ovs]
local_ip = 192.169.142.221
enable_tunneling = True
integration_bridge = br-int
tunnel_bridge = br-tun
bridge_mappings = physnet1:br-eth0
network_vlan_ranges = physnet1
[agent]
tunnel_types = vxlan
vxlan_udp_port = 4789
l2_population = False
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

When "external_network_bridge = " , Neutron places the external 
interface of the router into the OVS bridge specified by the 
"provider_network" provider attribute in the Neutron network. Traffic is
processed by Open vSwitch flow rules. In this configuration it is 
possible to utilize flat and VLAN provider networks.
 

[root@hacontroller1 ~(keystone_admin)]# ovs-ofctl show br-eth0
OFPT_FEATURES_REPLY (xid=0x2): dpid:00003e31a75b624a
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
 1(eth0): addr:52:54:00:41:74:39
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 2(phy-br-eth0): addr:de:0e:37:e4:28:49
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 LOCAL(br-eth0): addr:3e:31:a7:5b:62:4a
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0
[root@hacontroller1 ~(keystone_admin)]# ovs-ofctl dump-flows  br-eth0
NXST_FLOW reply (xid=0x4):
 cookie=0x0, duration=6785.707s, table=0, n_packets=18476, n_bytes=1202867, idle_age=3, priority=4,
in_port=2,dl_vlan=3 actions=strip_vlan,NORMAL <==== VLAN tag is striped
 cookie=0x0, duration=6977.001s, table=0, n_packets=13639, n_bytes=766402, idle_age=1, priority=2,in_port=2 actions=drop
 cookie=0x0, duration=6977.041s, table=0, n_packets=11557, n_bytes=10607506, idle_age=1, priority=0 actions=NORMAL
 
[root@hacontroller1 ~(keystone_admin)]# ovs-vsctl show
eae701a9-447e-4b75-98b5-4f7ce026ddbb
    Bridge br-tun
        fail_mode: secure
        Port "vxlan-c0a98ee0"
            Interface "vxlan-c0a98ee0"
                type: vxlan
                options: {df_default="true", in_key=flow, local_ip="192.169.142.221", out_key=flow, remote_ip="192.169.142.224"}
        Port br-tun
            Interface br-tun
                type: internal
        Port "vxlan-c0a98ede"
            Interface "vxlan-c0a98ede"
                type: vxlan
                options: {df_default="true", in_key=flow, local_ip="192.169.142.221", out_key=flow, remote_ip="192.169.142.222"}
        Port "vxlan-c0a98edf"
            Interface "vxlan-c0a98edf"
                type: vxlan
                options: {df_default="true", in_key=flow, local_ip="192.169.142.221", out_key=flow, remote_ip="192.169.142.223"}
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
    Bridge "br-eth0"
        Port "br-eth0"
            Interface "br-eth0"
                type: internal
        Port "eth0"
            Interface "eth0"               <=============
        Port "phy-br-eth0"
            Interface "phy-br-eth0"
                type: patch
                options: {peer="int-br-eth0"}
    Bridge br-int
        fail_mode: secure
        Port br-int
            Interface br-int
                type: internal
        Port "ha-013404f6-0b"
            tag: 2
            Interface "ha-013404f6-0b"
                type: internal
        Port "int-br-eth0"
            Interface "int-br-eth0"
                type: patch
                options: {peer="phy-br-eth0"}
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "qr-dd6507fd-73"
            tag: 1
            Interface "qr-dd6507fd-73"
                type: internal
        Port "qg-a37e106e-70"             ===============>
            tag: 3
            Interface "qg-a37e106e-70"
                type: internal
        Port "tap7e8e240c-aa"
            tag: 1
            Interface "tap7e8e240c-aa"
                type: internal
    ovs_version: "2.4.0" 
 

The packet exits the qg-a37e106e-70 (which is outgoing interface of 
corresponding qrouter-namespace attached to br-int due to external network provider involvment)
interface, where it is assigned the VLAN tag associated with the external network 3.
The packet is delivered to the external bridge, where a flow rule strip the VLAN tag 3.
The packet is sent out the physical interface associated with the bridge.
 

**************************************
Now check OVS flow at br-int
**************************************

[root@hacontroller1 ~(keystone_admin)]# ovs-ofctl show  br-int

OFPT_FEATURES_REPLY (xid=0x2): dpid:0000e6b4e9fe1044
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst

 1(int-br-eth0): addr:d2:35:eb:fc:ba:27
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 2(patch-tun): addr:1e:95:23:46:9e:a4
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 4(tap73cbf393-29): addr:00:00:00:00:00:00
     config:     PORT_DOWN
     state:      LINK_DOWN
     speed: 0 Mbps now, 0 Mbps max
 5(ha-013404f6-0b): addr:00:00:00:00:00:00
     config:     PORT_DOWN
     state:      LINK_DOWN
     speed: 0 Mbps now, 0 Mbps max
 6(qr-dd6507fd-73): addr:00:00:00:00:00:00
     config:     PORT_DOWN
     state:      LINK_DOWN
     speed: 0 Mbps now, 0 Mbps max
 7(qg-a37e106e-70): addr:00:00:00:00:00:00
     config:     PORT_DOWN
     state:      LINK_DOWN
     speed: 0 Mbps now, 0 Mbps max
 LOCAL(br-int): addr:e6:b4:e9:fe:10:44
     config:     PORT_DOWN
     state:      LINK_DOWN
     speed: 0 Mbps now, 0 Mbps max
OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0

[root@hacontroller1 ~(keystone_admin)]# ovs-ofctl dump-flows  br-int
NXST_FLOW reply (xid=0x4):
 cookie=0x87b766cda5a1f568, duration=5899.822s, table=0, n_packets=14899, n_bytes=18740788, idle_age=1334, priority=3,in_port=1,vlan_tci=0x0000 actions=mod_vlan_vid:3,NORMAL

 cookie=0x87b766cda5a1f568, duration=6141.616s, table=0, n_packets=0, n_bytes=0, idle_age=6141, priority=2,in_port=1 actions=drop
 cookie=0x87b766cda5a1f568, duration=5900.707s, table=0, n_packets=5, n_bytes=390, idle_age=5892, priority=2,in_port=4 actions=drop
 cookie=0x87b766cda5a1f568, duration=6141.675s, table=0, n_packets=59875, n_bytes=21916362, idle_age=0, priority=0 actions=NORMAL
 cookie=0x87b766cda5a1f568, duration=6141.671s, table=23, n_packets=0, n_bytes=0, idle_age=6141, priority=0 actions=drop
 cookie=0x87b766cda5a1f568, duration=6141.667s, table=24, n_packets=0, n_bytes=0, idle_age=6141, priority=0 actions=drop

************************************************************************
Notice that int-br-eth0 and phy-br-eth0 is a veth pair connecting
br-int and br-eth0
************************************************************************

[root@hacontroller1 ~(keystone_admin)]# ip netns
qrouter-afe13460-e106-4a0a-abf5-a618f97de6b9
qdhcp-847e5c9c-ce9f-4b2c-86fb-d7597017e8e3

[root@hacontroller1 ~(keystone_admin)]# ip netns exec qrouter-afe13460-e106-4a0a-abf5-a618f97de6b9  ip route
default via 10.10.10.1  dev  qg-a37e106e-70
10.10.10.0/24 qg-a37e106e-70 proto kernel scope link src  10.10.10.100
30.0.0.0/24 qr-dd6507fd-73 proto kernel scope link src 30.0.0.1
169.254.0.0/24 ha-013404f6-0b proto kernel scope link src 169.254.0.1
169.254.192.0/18 dev ha-013404f6-0b proto kernel scope link src 169.254.192.2


  
 
Per https://github.com/beekhof/osp-ha-deploy/commit/b2e01e86ca93cfad9ad01d533b386b4c9607c60d#diff-ee239d1187adb09f970dc4ddcf0df1c2 
 
Assuming eth0 is your interface attached to the external network, create two files in /etc/sysconfig/network-scripts/ as follows (change MTU if you need):

cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-eth0 
DEVICE=eth0 
ONBOOT=yes 
DEVICETYPE=ovs 
TYPE=OVSPort 
OVS_BRIDGE=br-eth0 
ONBOOT=yes 
BOOTPROTO=none 
VLAN=yes 
MTU="9000" 
NM_CONTROLLED=no 
EOF 

cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-br-eth0 
DEVICE=br-eth0 
DEVICETYPE=ovs 
OVSBOOTPROTO=none 
TYPE=OVSBridge 
ONBOOT=yes BOOTPROTO=static 
MTU="9000" 
NM_CONTROLLED=no 
EOF

Restart the network for the changes to take effect

[root@hacontroller1 ~(keystone_admin)]# neutron net-list
+--------------------------------------+----------------------------------------------------+-------------------------------------------------------+
| id                                   | name                                               | subnets                                               |
+--------------------------------------+----------------------------------------------------+-------------------------------------------------------+
| b4580386-bc02-4aa7-8792-ea4c40c41573 | public                                             | a2c617b1-17cc-4768-b213-9f0795d07b40 10.10.10.0/24    |
| ab421dc7-27fa-4984-ae21-ba9518887293 | HA network tenant efe017b919c1487bab8c58281fcaceeb | 6886d46c-4947-455d-8656-ff0f2a649632 169.254.192.0/18 |
| 847e5c9c-ce9f-4b2c-86fb-d7597017e8e3 | private                                            | 1c47d964-d7ec-4a72-a5a7-bc390c96359d 30.0.0.0/24      |
+--------------------------------------+----------------------------------------------------+-------------------------------------------------------+ 
 
[root@hacontroller1 ~(keystone_admin)]# neutron subnet-list
+--------------------------------------+---------------------------------------------------+------------------+------------------------------------------------------+
| id                                   | name                                              | cidr             | allocation_pools                                     |
+--------------------------------------+---------------------------------------------------+------------------+------------------------------------------------------+
| a2c617b1-17cc-4768-b213-9f0795d07b40 | public_subnet                                     | 10.10.10.0/24    | {"start": "10.10.10.100", "end": "10.10.10.150"}     |
| 6886d46c-4947-455d-8656-ff0f2a649632 | HA subnet tenant efe017b919c1487bab8c58281fcaceeb | 169.254.192.0/18 | {"start": "169.254.192.1", "end": "169.254.255.254"} |
| 1c47d964-d7ec-4a72-a5a7-bc390c96359d |                                                   | 30.0.0.0/24      | {"start": "30.0.0.2", "end": "30.0.0.254"}           |
+--------------------------------------+---------------------------------------------------+------------------+------------------------------------------------------+
  
[root@hacontroller2 ~(keystone_demo)]#  neutron router-gateway-set RouterDMS public
[root@hacontroller2 ~(keystone_demo)]#  neutron router-interface-add RouterDMS \
1c47d964-d7ec-4a72-a5a7-bc390c96359d 

[root@hacontroller2 ~(keystone_demo)]# neutron router-port-list RouterDMS
+--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
| id                                   | name | mac_address       | fixed_ips                                                                       |
+--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
| dd6507fd-73e3-45f6-a935-8bbf29dacbb9 |      | fa:16:3e:26:55:06 | {"subnet_id": "1c47d964-d7ec-4a72-a5a7-bc390c96359d", "ip_address": "30.0.0.1"} |
+--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+
[root@hacontroller2 ~(keystone_demo)]# neutron port-show dd6507fd-73e3-45f6-a935-8bbf29dacbb9
+-----------------------+--------------------------------------------------------------------------------------------------+
| Field                 | Value                                                                                            |
+-----------------------+--------------------------------------------------------------------------------------------------+
| admin_state_up        | True                                                                                             |
| allowed_address_pairs |                                                                                                  |
| binding:vnic_type     | normal                                                                                           |
| device_id             | afe13460-e106-4a0a-abf5-a618f97de6b9                                                             |
| device_owner          | network:router_interface                                                                         |
| dns_assignment        | {"hostname": "host-30-0-0-1", "ip_address": "30.0.0.1", "fqdn": "host-30-0-0-1.openstacklocal."} |
| dns_name              |                                                                                                  |
| extra_dhcp_opts       |                                                                                                  |
| fixed_ips             | {"subnet_id": "1c47d964-d7ec-4a72-a5a7-bc390c96359d", "ip_address": "30.0.0.1"}                  |
| id                    | dd6507fd-73e3-45f6-a935-8bbf29dacbb9                                                             |
| mac_address           | fa:16:3e:26:55:06                                                                                |
| name                  |                                                                                                  |
| network_id            | 847e5c9c-ce9f-4b2c-86fb-d7597017e8e3                                                             |
| security_groups       |                                                                                                  |
| status                | ACTIVE                                                                                           |
| tenant_id             | efe017b919c1487bab8c58281fcaceeb                                                                 |
+-----------------------+--------------------------------------------------------------------------------------------------+

********************************************
Creating security rules for tenant demo
********************************************

[root@hacontroller2 ~(keystone_demo)]#  neutron security-group-rule-create --protocol icmp \
--direction ingress --remote-ip-prefix 0.0.0.0/0 default

[root@hacontroller2 ~(keystone_demo)]#  neutron security-group-rule-create --protocol tcp \
--port-range-min 22 --port-range-max 22   --direction ingress --remote-ip-prefix 0.0.0.0/0 default 
 
********************************************
Creating ssh keypair for tenant demo
********************************************
[root@hacontroller2 ~(keystone_demo)]#  nova keypair-add oskey1 > oskey1.priv
[root@hacontroller2 ~(keystone_demo)]#  chmod 600  oskey1.priv

[root@hacontroller2 ~(keystone_demo)]# neutron net-list
+--------------------------------------+---------+----------------------------------------------------+
| id                                   | name    | subnets                                            |
+--------------------------------------+---------+----------------------------------------------------+
| b4580386-bc02-4aa7-8792-ea4c40c41573 | public  | a2c617b1-17cc-4768-b213-9f0795d07b40 10.10.10.0/24 |
| 847e5c9c-ce9f-4b2c-86fb-d7597017e8e3 | private | 1c47d964-d7ec-4a72-a5a7-bc390c96359d 30.0.0.0/24   |
+--------------------------------------+---------+--------------------------------------------------
[root@hacontroller2 ~(keystone_demo)]# glance image-list
+--------------------------------------+-----------+
| ID                                   | Name      |
+--------------------------------------+-----------+
| 6b4ee270-41ca-4a14-b584-d21f6ff5d6be | cirros    |
| e6945bf1-0a0d-4e99-a1fc-64ca45479095 | VF23Cloud |
+--------------------------------------+-----------+

[root@hacontroller2 ~(keystone_demo)]#  nova boot --flavor 2 --key_name oskey1 --image \
e6945bf1-0a0d-4e99-a1fc-64ca45479095 --nic net-id=847e5c9c-ce9f-4b2c-86fb-d7597017e8e3 VF23Devs05
 +--------------------------------------+--------------------------------------------------+
| Property                             | Value                                            |
+--------------------------------------+--------------------------------------------------+
| OS-DCF:diskConfig                    | MANUAL                                           |
| OS-EXT-AZ:availability_zone          |                                                  |
| OS-EXT-STS:power_state               | 0                                                |
| OS-EXT-STS:task_state                | scheduling                                       |
| OS-EXT-STS:vm_state                  | building                                         |
| OS-SRV-USG:launched_at               | -                                                |
| OS-SRV-USG:terminated_at             | -                                                |
| accessIPv4                           |                                                  |
| accessIPv6                           |                                                  |
| adminPass                            | 8c3HZUTS3jZ3                                     |
| config_drive                         |                                                  |
| created                              | 2015-11-28T17:44:02Z                             |
| flavor                               | m1.small (2)                                     |
| hostId                               |                                                  |
| id                                   | 68db2410-5d7d-42ca-82ab-6000123ab8d2             |
| image                                | VF23Cloud (e6945bf1-0a0d-4e99-a1fc-64ca45479095) |
| key_name                             | oskey1                                           |
| metadata                             | {}                                               |
| name                                 | VF23Devs05                                       |
| os-extended-volumes:volumes_attached | []                                               |
| progress                             | 0                                                |
| security_groups                      | default                                          |
| status                               | BUILD                                            |
| tenant_id                            | efe017b919c1487bab8c58281fcaceeb                 |
| updated                              | 2015-11-28T17:44:03Z                             |
| user_id                              | 426a9a98019f4055a2edb3d145355646                 |
+--------------------------------------+--------------------------------------------------+
[root@hacontroller2 ~(keystone_demo)]# nova list

+--------------------------------------+------------+---------+------------+-------------+--------------------------------+
| ID                                   | Name       | Status  | Task State | Power State | Networks                       |
+--------------------------------------+------------+---------+------------+-------------+--------------------------------+
| 2b0f822f-be17-43c1-b127-f626d5a62823 | CirrOSDevs | SHUTOFF | -          | Shutdown    | private=30.0.0.4, 10.10.10.101 |
| 68db2410-5d7d-42ca-82ab-6000123ab8d2 | VF23Devs05 | BUILD   | spawning   | NOSTATE     |                                |
+--------------------------------------+------------+---------+------------+-------------+--------------------------------+
[root@hacontroller2 ~(keystone_demo)]# nova list
+--------------------------------------+------------+---------+------------+-------------+--------------------------------+
| ID                                   | Name       | Status  | Task State | Power State | Networks                       |
+--------------------------------------+------------+---------+------------+-------------+--------------------------------+
| 2b0f822f-be17-43c1-b127-f626d5a62823 | CirrOSDevs | SHUTOFF | -          | Shutdown    | private=30.0.0.4, 10.10.10.101 |
| 68db2410-5d7d-42ca-82ab-6000123ab8d2 | VF23Devs05 | ACTIVE  | -          | Running     | private=30.0.0.10              |
+--------------------------------------+------------+---------+------------+-------------+--------------------------------+

[root@hacontroller2 ~(keystone_demo)]# neutron port-list --device-id \
68db2410-5d7d-42ca-82ab-6000123ab8d2
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------+
| id                                   | name | mac_address       | fixed_ips                                                                        |
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------+
| 879c8ca8-fe8e-42d7-8b6b-34be981d03d0 |      | fa:16:3e:32:47:49 | {"subnet_id": "1c47d964-d7ec-4a72-a5a7-bc390c96359d", "ip_address": "30.0.0.10"} |
+--------------------------------------+------+-------------------+----------------------------------------------------------------------------------+

[root@hacontroller2 ~(keystone_demo)]# neutron floatingip-create public
Created a new floatingip:
+---------------------+--------------------------------------+
| Field               | Value                                |
+---------------------+--------------------------------------+
| fixed_ip_address    |                                      |
| floating_ip_address | 10.10.10.102                         |
| floating_network_id | b4580386-bc02-4aa7-8792-ea4c40c41573 |
| id                  | aa48fd10-bb25-46ae-8f76-eb90e343b3f1 |
| port_id             |                                      |
| router_id           |                                      |
| status              | DOWN                                 |
| tenant_id           | efe017b919c1487bab8c58281fcaceeb     |
+---------------------+--------------------------------------+

[root@hacontroller2 ~(keystone_demo)]# neutron floatingip-associate \
aa48fd10-bb25-46ae-8f76-eb90e343b3f1 879c8ca8-fe8e-42d7-8b6b-34be981d03d0
Associated floating IP aa48fd10-bb25-46ae-8f76-eb90e343b3f1

[root@hacontroller2 ~(keystone_demo)]# nova list
+--------------------------------------+------------+---------+------------+-------------+---------------------------------+
| ID                                   | Name       | Status  | Task State | Power State | Networks                        |
+--------------------------------------+------------+---------+------------+-------------+---------------------------------+
| 2b0f822f-be17-43c1-b127-f626d5a62823 | CirrOSDevs | SHUTOFF | -          | Shutdown    | private=30.0.0.4, 10.10.10.101  |
| 68db2410-5d7d-42ca-82ab-6000123ab8d2 | VF23Devs05 | ACTIVE  | -          | Running     | private=30.0.0.10, 10.10.10.102 |
+--------------------------------------+------------+---------+------------+-------------+---------------------------------+
 
[root@hacontroller1 ~(keystone_admin)]# ip netns exec qrouter-afe13460-e106-4a0a-abf5-a618f97de6b9   ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
8: ha-013404f6-0b: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:d5:7e:6f brd ff:ff:ff:ff:ff:ff
    inet 169.254.192.2/18 brd 169.254.255.255 scope global ha-013404f6-0b
       valid_lft forever preferred_lft forever
    inet 169.254.0.1/24 scope global ha-013404f6-0b
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fed5:7e6f/64 scope link 
       valid_lft forever preferred_lft forever
9: qr-dd6507fd-73: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:26:55:06 brd ff:ff:ff:ff:ff:ff
    inet 30.0.0.1/24 scope global qr-dd6507fd-73
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe26:5506/64 scope link nodad 
       valid_lft forever preferred_lft forever
10: qg-a37e106e-70: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:0e:4b:eb brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.100/24 scope global qg-a37e106e-70
       valid_lft forever preferred_lft forever
    inet 10.10.10.101/32 scope global qg-a37e106e-70
       valid_lft forever preferred_lft forever
    inet 10.10.10.102/32 scope global qg-a37e106e-70
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe0e:4beb/64 scope link nodad 
       valid_lft forever preferred_lft forever
 
 
Instance started
 
  [root@hacontroller2 ~(keystone_demo)]# nova console-log VF23Devs05
  


References
1.http://blog.oddbit.com/2015/08/13/provider-external-networks-details/ 
2.https://github.com/beekhof/osp-ha-deploy/blob/master/keepalived/neutron-config.md 

Tuesday, November 17, 2015

Attempt to set up HAProxy/Keepalived 3 Node Controller on RDO Liberty per Javier Pena

URGENT UPDATE 11/18/2015
 Please, view https://github.com/beekhof/osp-ha-deploy/commit/b2e01e86ca93cfad9ad01d533b386b4c9607c60d
 It looks as work in progress.
 See also https://www.redhat.com/archives/rdo-list/2015-November/msg00168.html
END UPDATE

  Actually, setup bellow follows closely https://github.com/beekhof/osp-ha-deploy/blob/master/HA-keepalived.md
As far as to my knowledge Cisco's schema has been implemented :-
Keepalived, HAProxy,Galera for MySQL Manual install, at least 3 controller nodes. I just highlighted several steps  which as I believe allowed me to bring this work to success.  Javier is using flat external network provider for Controllers cluster disabling from the same start NetworkManager && enabling service network, there is one step which i was unable to skip. It's disabling IP's of eth0's interfaces && restarting network service right before running `ovs-vsctl add-port br-eth0 eth0` per  Neutron building instructions of mentioned "Howto", which seems to be one of the best I've ever seen.
  I (just) guess that due this sequence of steps even on already been built and seems to run OK  three nodes Controller Cluster external network is still ping able :-

 
  However, would i disable eth0's IPs from the start i would lost connectivity right away switching to network service from NetworkManager . In general,  external network is supposed to be ping able from qrouter namespace due to Neutron router's  DNAT/SNAT IPtables forwarding, but not from Controller . I am also aware of that when Ethernet interface becomes an OVS port of OVS bridge it's IP is supposed to be suppressed. When external network provider is not used , then br-ex gets any IP  available IP on external network. Using external network provider changes situation. Details may be seen here :-
https://www.linux.com/community/blogs/133-general-linux/858156-multiple-external-networks-with-a-single-l3-agent-testing-on-rdo-liberty-per-lars-kellogg-stedman

[root@hacontroller1 ~(keystone_admin)]# systemctl status NetworkManager
NetworkManager.service - Network Manager
   Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; disabled)
   Active: inactive (dead)

[root@hacontroller1 ~(keystone_admin)]# systemctl status network
network.service - LSB: Bring up/down networking
   Loaded: loaded (/etc/rc.d/init.d/network)
   Active: active (exited) since Wed 2015-11-18 08:36:53 MSK; 2h 10min ago
  Process: 708 ExecStart=/etc/rc.d/init.d/network start (code=exited, status=0/SUCCESS)


Nov 18 08:36:47 hacontroller1.example.com network[708]: Bringing up loopback interface:  [  OK  ]
Nov 18 08:36:51 hacontroller1.example.com network[708]: Bringing up interface eth0:  [  OK  ]
Nov 18 08:36:53 hacontroller1.example.com network[708]: Bringing up interface eth1:  [  OK  ]
Nov 18 08:36:53 hacontroller1.example.com systemd[1]: Started LSB: Bring up/down networking.

[root@hacontroller1 ~(keystone_admin)]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet6 fe80::5054:ff:fe6d:926a  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:6d:92:6a  txqueuelen 1000  (Ethernet)
        RX packets 5036  bytes 730778 (713.6 KiB)
        RX errors 0  dropped 12  overruns 0  frame 0
        TX packets 15715  bytes 930045 (908.2 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.169.142.221  netmask 255.255.255.0  broadcast 192.169.142.255
        inet6 fe80::5054:ff:fe5e:9644  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:5e:96:44  txqueuelen 1000  (Ethernet)
        RX packets 1828396  bytes 283908183 (270.7 MiB)
        RX errors 0  dropped 13  overruns 0  frame 0
        TX packets 1839312  bytes 282429736 (269.3 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 0  (Local Loopback)
        RX packets 869067  bytes 69567890 (66.3 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 869067  bytes 69567890 (66.3 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@hacontroller1 ~(keystone_admin)]# ping -c 3  10.10.10.1
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=2.04 ms
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=0.103 ms
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=0.118 ms

--- 10.10.10.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2001ms
rtt min/avg/max/mdev = 0.103/0.754/2.043/0.911 ms



 
  Both mgmt and external networks emulated by corresponging Libvirt Networks
on F23 Virtualization Server. Total four VMs been setup , 3 of them for Controller
nodes and one for compute (4 VCPUS, 4 GB RAM)

[root@fedora23wks ~]# cat openstackvms.xml ( for eth1's)
<network>
   <name>openstackvms</name>
   <uuid>d0e9964a-f91a-40c0-b769-a609aee41bf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr1' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='192.169.142.1' netmask='255.255.255.0'>
     <dhcp>
       <range start='192.169.142.2' end='192.169.142.254' />
     </dhcp>
   </ip>
 </network>
[root@fedora23wks ~]# cat public.xml ( for external network provider )
<network>
   <name>public</name>
   <uuid>d0e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr2' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='10.10.10.1' netmask='255.255.255.0'>
     <dhcp>
       <range start='10.10.10.2' end='10.10.10.254' />
     </dhcp>
   </ip>
 </network>

Only one file is bit different on Controller Nodes , it is l3_agent.ini

[root@hacontroller1 neutron(keystone_demo)]# cat l3_agent.ini | grep -v ^# | grep -v ^$
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
handle_internal_only_routers = True
send_arp_for_ha = 3
metadata_ip = controller-vip.example.com
external_network_bridge =
gateway_external_network_id =
[AGENT]

When "external_network_bridge = " , Neutron places the external interface of the router into the OVS bridge specified by the "provider_network" provider attribute in the Neutron network. Traffic is processed by Open vSwitch flow rules. In this configuration it is possible to utilize flat and VLAN provider networks.

*************************************************************************************
Due to posted "UPDATE" on the top of  the blog entry in meantime
perfect solution is provided by https://github.com/beekhof/osp-ha-deploy/commit/b2e01e86ca93cfad9ad01d533b386b4c9607c60d
Per mentioned patch, assuming eth0 is your interface attached to the external network, create two files in /etc/sysconfig/network-scripts/ as follows (change MTU if you need):

    cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-eth0
    DEVICE=eth0
    ONBOOT=yes
    DEVICETYPE=ovs
    TYPE=OVSPort
    OVS_BRIDGE=br-eth0
    ONBOOT=yes
    BOOTPROTO=none
    VLAN=yes
    MTU="9000"
    NM_CONTROLLED=no
    EOF

    cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-br-eth0
    DEVICE=br-eth0
    DEVICETYPE=ovs
    OVSBOOTPROTO=none
    TYPE=OVSBridge
    ONBOOT=yes
    BOOTPROTO=static
    MTU="9000"
    NM_CONTROLLED=no
    EOF

Restart the network for the changes to take effect.

systemctl restart network

The commit has been done on 11/14/2015 right after discussion at RDO mailing list.

Details maiby seen here Nova and Neutron work-flow && CLI for HAProxy/Keepalived 3 Node Controller RDO Liberty

 *************************************************************************************
One more step which I did ( not sure that is really has
to be done at this point of time ).IP's on eth0's interfaces were disabled just before
running `ovs-vsctl add-port br-eth0 eth0`:-

1. Updated ifcfg-eth0 files on all Controllers
2. `service network restart` on all Controllers
3.  `ovs-vsctl add-port br-eth0 eth0`on all Controllers

*****************************************************************************************
Targeting just POC ( to get floating ips accessible from Fedora 23 Virtualization
host )  resulted  Controllers Cluster setup:-
*****************************************************************************************
I installed only

Keystone

**************************
UPDATE to official docs
**************************
[root@hacontroller1 ~(keystone_admin)]# cat   keystonerc_admin
export OS_USERNAME=admin
export OS_TENANT_NAME=admin
export OS_PROJECT_NAME=admin
export OS_REGION_NAME=regionOne
export OS_PASSWORD=keystonetest
export OS_AUTH_URL=http://controller-vip.example.com:35357/v2.0/
export OS_SERVICE_ENDPOINT=http://controller-vip.example.com:35357/v2.0
export OS_SERVICE_TOKEN=$(cat /root/keystone_service_token)

export PS1='[\u@\h \W(keystone_admin)]\$ '

Glance
Neutron
Nova
Horizon

Due to running Galera Synchronous MultiMaster Replication between Controllers each commands like :-

# su keystone -s /bin/sh -c "keystone-manage db_sync"
# su glance -s /bin/sh -c "glance-manage db_sync"
# neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
# su nova -s /bin/sh -c "nova-manage db sync"

are supposed to run just once from Conroller node 1 ( for instance )


************************
Compute Node setup:-
*************************

Compute setup

**********************
On all nodes
**********************
[root@hacontroller1 neutron(keystone_demo)]# cat /etc/hosts
192.169.142.220 controller-vip.example.com controller-vip
192.169.142.221 hacontroller1.example.com hacontroller1
192.169.142.222 hacontroller2.example.com hacontroller2
192.169.142.223 hacontroller3.example.com hacontroller3
192.169.142.224 compute.example.con compute
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

[root@hacontroller1 ~(keystone_admin)]# cat /etc/neutron/neutron.conf | grep -v ^$| grep -v ^#
[DEFAULT]
bind_host = 192.169.142.22(X)
auth_strategy = keystone
notification_driver = neutron.openstack.common.notifier.rpc_notifier
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
service_plugins = router,lbaas
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
dhcp_agents_per_network = 2
api_workers = 2
rpc_workers = 2
l3_ha = True
min_l3_agents_per_router = 2
max_l3_agents_per_router = 2

[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://controller-vip.example.com:5000/
identity_uri = http://127.0.0.1:5000
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
auth_plugin = password
auth_url = http://controller-vip.example.com:35357/
username = neutron
password = neutrontest
project_name = services
[database]
connection = mysql://neutron:neutrontest@controller-vip.example.com:3306/neutron
max_retries = -1
[nova]
nova_region_name = regionOne
project_domain_id = default
project_name = services
user_domain_id = default
password = novatest
username = compute
auth_url = http://controller-vip.example.com:35357/
auth_plugin = password
[oslo_concurrency]
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts = hacontroller1,hacontroller2,hacontroller3
rabbit_ha_queues = true
[qos]


[root@hacontroller1 haproxy(keystone_demo)]# cat haproxy.cfg
global
    daemon
    stats socket /var/lib/haproxy/stats
defaults
    mode tcp
    maxconn 10000
    timeout connect 5s
    timeout client 30s
    timeout server 30s

listen monitor
    bind 192.169.142.220:9300
    mode http
    monitor-uri /status
    stats enable
    stats uri /admin
    stats realm Haproxy\ Statistics
    stats auth root:redhat
    stats refresh 5s

frontend vip-db
    bind 192.169.142.220:3306
    timeout client 90m
    default_backend db-vms-galera
backend db-vms-galera
    option httpchk
    stick-table type ip size 1000
    stick on dst
    timeout server 90m
    server rhos8-node1 192.169.142.221:3306 check inter 1s port 9200 backup on-marked-down shutdown-sessions
    server rhos8-node2 192.169.142.222:3306 check inter 1s port 9200 backup on-marked-down shutdown-sessions
    server rhos8-node3 192.169.142.223:3306 check inter 1s port 9200 backup on-marked-down shutdown-sessions

# Note the RabbitMQ entry is only needed for CloudForms compatibility
# and should be removed in the future
frontend vip-rabbitmq
    option clitcpka
    bind 192.169.142.220:5672
    timeout client 900m
    default_backend rabbitmq-vms
backend rabbitmq-vms
    option srvtcpka
    balance roundrobin
    timeout server 900m
    server rhos8-node1 192.169.142.221:5672 check inter 1s
    server rhos8-node2 192.169.142.222:5672 check inter 1s
    server rhos8-node3 192.169.142.223:5672 check inter 1s

frontend vip-keystone-admin
    bind 192.169.142.220:35357
    default_backend keystone-admin-vms
    timeout client 600s
backend keystone-admin-vms
    balance roundrobin
    timeout server 600s
    server rhos8-node1 192.169.142.221:35357 check inter 1s on-marked-down shutdown-sessions
    server rhos8-node2 192.169.142.222:35357 check inter 1s on-marked-down shutdown-sessions
    server rhos8-node3 192.169.142.223:35357 check inter 1s on-marked-down shutdown-sessions

frontend vip-keystone-public
    bind 192.169.142.220:5000
    default_backend keystone-public-vms
    timeout client 600s
backend keystone-public-vms
    balance roundrobin
    timeout server 600s
    server rhos8-node1 192.169.142.221:5000 check inter 1s on-marked-down shutdown-sessions
    server rhos8-node2 192.169.142.222:5000 check inter 1s on-marked-down shutdown-sessions
    server rhos8-node3 192.169.142.223:5000 check inter 1s on-marked-down shutdown-sessions

frontend vip-glance-api
    bind 192.169.142.220:9191
    default_backend glance-api-vms
backend glance-api-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:9191 check inter 1s
    server rhos8-node2 192.169.142.222:9191 check inter 1s
    server rhos8-node3 192.169.142.223:9191 check inter 1s

frontend vip-glance-registry
    bind 192.169.142.220:9292
    default_backend glance-registry-vms
backend glance-registry-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:9292 check inter 1s
    server rhos8-node2 192.169.142.222:9292 check inter 1s
    server rhos8-node3 192.169.142.223:9292 check inter 1s

frontend vip-cinder
    bind 192.169.142.220:8776
    default_backend cinder-vms
backend cinder-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8776 check inter 1s
    server rhos8-node2 192.169.142.222:8776 check inter 1s
    server rhos8-node3 192.169.142.223:8776 check inter 1s

frontend vip-swift
    bind 192.169.142.220:8080
    default_backend swift-vms
backend swift-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8080 check inter 1s
    server rhos8-node2 192.169.142.222:8080 check inter 1s
    server rhos8-node3 192.169.142.223:8080 check inter 1s

frontend vip-neutron
    bind 192.169.142.220:9696
    default_backend neutron-vms
backend neutron-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:9696 check inter 1s
    server rhos8-node2 192.169.142.222:9696 check inter 1s
    server rhos8-node3 192.169.142.223:9696 check inter 1s

frontend vip-nova-vnc-novncproxy
    bind 192.169.142.220:6080
    default_backend nova-vnc-novncproxy-vms
backend nova-vnc-novncproxy-vms
    balance roundrobin
    timeout tunnel 1h
    server rhos8-node1 192.169.142.221:6080 check inter 1s
    server rhos8-node2 192.169.142.222:6080 check inter 1s
    server rhos8-node3 192.169.142.223:6080 check inter 1s

frontend nova-metadata-vms
    bind 192.169.142.220:8775
    default_backend nova-metadata-vms
backend nova-metadata-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8775 check inter 1s
    server rhos8-node2 192.169.142.222:8775 check inter 1s
    server rhos8-node3 192.169.142.223:8775 check inter 1s

frontend vip-nova-api
    bind 192.169.142.220:8774
    default_backend nova-api-vms
backend nova-api-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8774 check inter 1s
    server rhos8-node2 192.169.142.222:8774 check inter 1s
    server rhos8-node3 192.169.142.223:8774 check inter 1s

frontend vip-horizon
    bind 192.169.142.220:80
    timeout client 180s
    default_backend horizon-vms
backend horizon-vms
    balance roundrobin
    timeout server 180s
    mode http
    cookie SERVERID insert indirect nocache
    server rhos8-node1 192.169.142.221:80 check inter 1s cookie rhos8-horizon1 on-marked-down shutdown-sessions
    server rhos8-node2 192.169.142.222:80 check inter 1s cookie rhos8-horizon2 on-marked-down shutdown-sessions
    server rhos8-node3 192.169.142.223:80 check inter 1s cookie rhos8-horizon3 on-marked-down shutdown-sessions

frontend vip-heat-cfn
    bind 192.169.142.220:8000
    default_backend heat-cfn-vms
backend heat-cfn-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8000 check inter 1s
    server rhos8-node2 192.169.142.222:8000 check inter 1s
    server rhos8-node3 192.169.142.223:8000 check inter 1s

frontend vip-heat-cloudw
    bind 192.169.142.220:8003
    default_backend heat-cloudw-vms
backend heat-cloudw-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8003 check inter 1s
    server rhos8-node2 192.169.142.222:8003 check inter 1s
    server rhos8-node3 192.169.142.223:8003 check inter 1s

frontend vip-heat-srv
    bind 192.169.142.220:8004
    default_backend heat-srv-vms
backend heat-srv-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8004 check inter 1s
    server rhos8-node2 192.169.142.222:8004 check inter 1s
    server rhos8-node3 192.169.142.223:8004 check inter 1s

frontend vip-ceilometer
    bind 192.169.142.220:8777
    timeout client 90s
    default_backend ceilometer-vms
backend ceilometer-vms
    balance roundrobin
    timeout server 90s
    server rhos8-node1 192.169.142.221:8777 check inter 1s
    server rhos8-node2 192.169.142.222:8777 check inter 1s
    server rhos8-node3 192.169.142.223:8777 check inter 1s

frontend vip-sahara
    bind 192.169.142.220:8386
    default_backend sahara-vms
backend sahara-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8386 check inter 1s
    server rhos8-node2 192.169.142.222:8386 check inter 1s
    server rhos8-node3 192.169.142.223:8386 check inter 1s

frontend vip-trove
    bind 192.169.142.220:8779
    default_backend trove-vms
backend trove-vms
    balance roundrobin
    server rhos8-node1 192.169.142.221:8779 check inter 1s
    server rhos8-node2 192.169.142.222:8779 check inter 1s
    server rhos8-node3 192.169.142.223:8779 check inter 1s

[root@hacontroller1 ~(keystone_demo)]# cat /etc/my.cnf.d/galera.cnf
[mysqld]
skip-name-resolve=1
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
max_connections=8192
query_cache_size=0
query_cache_type=0
bind_address=192.169.142.22(X)
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="galera_cluster"
wsrep_cluster_address="gcomm://192.169.142.221,192.169.142.222,192.169.142.223"
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync

[root@hacontroller1 ~(keystone_demo)]# cat /etc/keepalived/keepalived.conf
vrrp_script chk_haproxy {
    script "/usr/bin/killall -0 haproxy"
    interval 2
}

vrrp_instance VI_PUBLIC {
    interface eth1
    state BACKUP
    virtual_router_id 52
    priority 101
    virtual_ipaddress {
        192.169.142.220 dev eth1
    }
    track_script {
        chk_haproxy
    }
    # Avoid failback
    nopreempt
}

vrrp_sync_group VG1
    group {
        VI_PUBLIC
    }

*************************************************************************
The most difficult  procedure is re-syncing Galera Mariadb cluster
*************************************************************************
https://github.com/beekhof/osp-ha-deploy/blob/master/keepalived/galera-bootstrap.md

Due to nova services start not waiting for getting in sync Galera databases, after sync is done and regardless systemctl reports that service are up and running, database update by `openstack-service restart nova` is required on every Controller.  Also the most suspicious reason for failure access Nova metadata Server by starting VMs is failure to start neutron-l3-agent service  on each Controller due to classical design - VM's access metadata via neutron-ns-metadata-proxy running in qrouter namespace. neutron-l3-agents may be started with no problems, some times just restarted when needed.



*****************************************
Creating Neutron Router via CLI.
*****************************************

[root@hacontroller1 ~(keystone_admin)]# cat  keystonerc_admin
export OS_USERNAME=admin
export OS_TENANT_NAME=admin
export OS_PROJECT_NAME=admin
export OS_REGION_NAME=regionOne
export OS_PASSWORD=keystonetest
export OS_AUTH_URL=http://controller-vip.example.com:35357/v2.0/
export OS_SERVICE_ENDPOINT=http://controller-vip.example.com:35357/v2.0
export OS_SERVICE_TOKEN=$(cat /root/keystone_service_token)

export PS1='[\u@\h \W(keystone_admin)]\$ '


[root@hacontroller1 ~(keystone_admin)]# keystone tenant-list

+----------------------------------+----------+---------+
|                id                |   name   | enabled |
+----------------------------------+----------+---------+
| acdc927b53bd43ae9a7ed657d1309884 |  admin   |   True  |
| 7db0aa013d60434996585c4ee359f512 |   demo   |   True  |
| 9d8bf126d54e4d11a109bd009f54a87f   | services |   True  |
+----------------------------------+----------+---------+

[root@hacontroller1 ~(keystone_admin)]# neutron router-create --ha True --tenant-id 7db0aa013d60434996585c4ee359f512  RouterDS
Created a new router:
+-----------------------+--------------------------------------+
| Field                 | Value                                |
+-----------------------+--------------------------------------+
| admin_state_up   |   True                                 |
| distributed          | False                                |
| external_gateway_info |                                      |
| ha                       | True                                 |
| id                        | fdf540d2-c128-4677-b403-d71c796d7e18 |
| name                  | RouterDS                             |
| routes                |                                      |
| status                | ACTIVE                               |
| tenant_id           | 7db0aa013d60434996585c4ee359f512   |
+-----------------------+--------------------------------------+


    

    RUN Time Snapshots. Keepalived status on Controller's nodes

   

   HA Neutron router belonging tenant demo create via Neutron CLI 
 

***********************************************************************
 At this point hacontroller1 goes down. On hacontroller2 run :-
***********************************************************************
root@hacontroller2 ~(keystone_admin)]# neutron l3-agent-list-hosting-router RouterHA
+--------------------------------------+---------------------------+----------------+-------+----------+
| id                                   | host                      | admin_state_up | alive | ha_state |
+--------------------------------------+---------------------------+----------------+-------+----------+
| a03409d2-fbe9-492c-a954-e1bdf7627491 | hacontroller2.example.com | True           | :-)   | active   |
| 0d6e658a-e796-4cff-962f-06e455fce02f | hacontroller1.example.com | True           | xxx   | active   |
+--------------------------------------+---------------------------+----------------+-------+-------

  
***********************************************************************
 At this point hacontroller2 goes down. hacontroller1 goes up :-
***********************************************************************


          Nova Services status on all Controllers
  



     Neutron Services status on all Controllers  


   Compute Node status
  

  

 ******************************************************************************
 Cloud VM (L3) at runtime . Accessibility from F23 Virtualization Host,
 running HA 3  Nodes Controller and Compute Node VMs (L2)
 ******************************************************************************
[root@fedora23wks ~]# ping  10.10.10.103
PING 10.10.10.103 (10.10.10.103) 56(84) bytes of data.
64 bytes from 10.10.10.103: icmp_seq=1 ttl=63 time=1.14 ms
64 bytes from 10.10.10.103: icmp_seq=2 ttl=63 time=0.813 ms
64 bytes from 10.10.10.103: icmp_seq=3 ttl=63 time=0.636 ms
64 bytes from 10.10.10.103: icmp_seq=4 ttl=63 time=0.778 ms
64 bytes from 10.10.10.103: icmp_seq=5 ttl=63 time=0.493 ms
^C
--- 10.10.10.103 ping statistics ---
5 packets transmitted, 5 received, 0% packet loss, time 4001ms
rtt min/avg/max/mdev = 0.493/0.773/1.146/0.218 ms

[root@fedora23wks ~]# ssh -i oskey1.priv fedora@10.10.10.103
Last login: Tue Nov 17 09:02:30 2015
[fedora@vf23dev ~]$ uname -a
Linux vf23dev.novalocal 4.2.5-300.fc23.x86_64 #1 SMP Tue Oct 27 04:29:56 UTC 2015 x86_64 x86_64 x86_64 GNU/Linux

  
   
  

 ********************************************************************************
 Verifying neutron workflow on 3 node controller been built via patch:-
 ********************************************************************************
[root@hacontroller1 ~(keystone_admin)]# ovs-ofctl show br-eth0
OFPT_FEATURES_REPLY (xid=0x2): dpid:0000baf0db1a854f
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
 1(eth0): addr:52:54:00:aa:0e:fc
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 2(phy-br-eth0): addr:46:c0:e0:30:72:92
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 LOCAL(br-eth0): addr:ba:f0:db:1a:85:4f
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0
[root@hacontroller1 ~(keystone_admin)]# ovs-ofctl dump-flows  br-eth0
NXST_FLOW reply (xid=0x4):
 cookie=0x0, duration=15577.057s, table=0, n_packets=50441, n_bytes=3262529, idle_age=2, priority=4,in_port=2,dl_vlan=3 actions=strip_vlan,NORMAL
 cookie=0x0, duration=15765.938s, table=0, n_packets=31225, n_bytes=1751795, idle_age=0, priority=2,in_port=2 actions=drop
 cookie=0x0, duration=15765.974s, table=0, n_packets=39982, n_bytes=42838752, idle_age=1, priority=0 actions=NORMAL

Check `ovs-vsctl show`

 Bridge br-int
        fail_mode: secure
        Port "tapc8488877-45"
            tag: 4
            Interface "tapc8488877-45"
                type: internal
        Port br-int
            Interface br-int
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "tap14aa6eeb-70"
            tag: 2
            Interface "tap14aa6eeb-70"
                type: internal
        Port "qr-8f5b3f4a-45"
            tag: 2
            Interface "qr-8f5b3f4a-45"
                type: internal
        Port "int-br-eth0"
            Interface "int-br-eth0"
                type: patch
                options: {peer="phy-br-eth0"}
        Port "qg-34893aa0-17"
            tag: 3



[root@hacontroller2 ~(keystone_demo)]# ovs-ofctl show  br-eth0
OFPT_FEATURES_REPLY (xid=0x2): dpid:0000b6bfa2bafd45
n_tables:254, n_buffers:256
capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
 1(eth0): addr:52:54:00:73:df:29
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 2(phy-br-eth0): addr:be:89:61:87:56:20
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
 LOCAL(br-eth0): addr:b6:bf:a2:ba:fd:45
     config:     0
     state:      0
     speed: 0 Mbps now, 0 Mbps max
OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0

[root@hacontroller2 ~(keystone_demo)]# ovs-ofctl dump-flows  br-eth0
NXST_FLOW reply (xid=0x4):
 cookie=0x0, duration=15810.746s, table=0, n_packets=0, n_bytes=0, idle_age=15810, priority=4,in_port=2,dl_vlan=2 actions=strip_vlan,NORMAL
 cookie=0x0, duration=16105.662s, table=0, n_packets=31849, n_bytes=1786827, idle_age=0, priority=2,in_port=2 actions=drop
 cookie=0x0, duration=16105.696s, table=0, n_packets=39762, n_bytes=2100763, idle_age=0, priority=0 actions=NORMAL

Check `ovs-vsctl show`

   Bridge br-int
        fail_mode: secure
        Port "qg-34893aa0-17"
            tag: 2
            Interface "qg-34893aa0-17"
                type: internal


Qrouter's namespace output interface   qg-xxxxxx sends vlan tagged packets to eth0 (which has VLAN=yes, see link bellow) , but OVS bridge br-eth0 is not aware of vlan tagging , it strips tags before sending packets outside into external flat network. In case of external network providers qg-xxxxxx interfaces are on br-int and that is normal. I believe that it's core reason why patch  https://github.com/beekhof/osp-ha-deploy/commit/b2e01e86ca93cfad9ad01d533b386b4c9607c60d
works pretty stable. This issue doesn't show up on single controller and appears
to be critical for HAProxy/Keepalived 3 node controllers cluster at least via my
experience.

Per Lars Kellogg-Stedman [ 1 ]
  1. The packet exits the qg-... interface of the router (where it is assigned the VLAN tag associated with the external network). (N)
  2. The packet is delivered to the external bridge, where a flow rule strip the VLAN tag. (P)
  3. The packet is sent out the physical interface associated with the bridge.

References
1.  http://blog.oddbit.com/2015/08/13/provider-external-networks-details/
2.  https://ask.openstack.org/en/question/85055/how-does-external-network-provider-work-flatvlangre/
 

Saturday, November 7, 2015

DVR set up on RDO Liberty with separated Controller && Network Nodes

  Actually, setup down here was carefully tested in regards of Mitaka Milestone 1 which hopefully will allow to verify solution provided by Bug #1365473 Unable to create a router that's both HA and distributed
Delorean repos now are supposed to be rebuilt and ready for testing via RDO deployment in a week after each Mitaka Milestone [ 1 ] .

  The DVR is providing direct (vice/versa) access to external network on Compute nodes. For instances with a floating IP addresses routing from project to external network is performed on the compute nodes.Thus DVR eliminates single point of failure and network congestion on Network Node.Agent_mode is set "dvr" in l3_agent.ini on Compute Nodes. Instances with a fixed IP address (only) still rely on the only network node for outbound connectivity via SNAT. Agent_mode is set "dvr_snat" in l3_agent.ini on Network Node.  To support DVR each compute node is running neutron-l3-agent,neutron-metadata-agent,neutron-openvswitch-agent. DVR also requires L2population activated and ARP proxys running on Neutron L2 layer. 

Setup

192.169.142.127 - Controller
192.169.142.147 -Network Node
192.169.142.137 - Compute Node
192.169.142.157 - Compute Node

*********************************************************************************
1. First Libvirt subnet "openstackvms"  serves as management network.
All 3 VM are attached to this subnet . Attached to all nodes
**********************************************************************************
2. Second Libvirt subnet "public" serves for simulation external network  Network Node && Compute node are attached to public, latter on "eth2" interface (belongs to "public") is supposed to be converted into OVS port of br-ex OVS bridges on Network Node and Compute nodes
***********************************************************************************
3.Third Libvirt subnet "vteps" serves  for VTEPs endpoint simulation. Network and Compute Node VMs are attached to this subnet. ***********************************************************************************

# cat openstackvms.xml

<network>
   <name>openstackvms</name>
   <uuid>d0e9964a-f91a-40c0-b769-a609aee41bf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr1' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='192.169.142.1' netmask='255.255.255.0'>
     <dhcp>
       <range start='192.169.142.2' end='192.169.142.254' />
     </dhcp>
   </ip>
 </network>

# cat public.xml
<network>
   <name>public</name>
   <uuid>d1e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr2' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='172.24.4.225' netmask='255.255.255.240'>
     <dhcp>
       <range start='172.24.4.226' end='172.24.4.238' />
     </dhcp>
  </ip>
 </network>


# cat vteps.xml
<network>
   <name>vteps</name>
   <uuid>d0e9965b-f92c-40c1-b749-b609aed42cf2</uuid>
   <forward mode='nat'>
     <nat>
       <port start='1024' end='65535'/>
     </nat>
   </forward>
   <bridge name='virbr2' stp='on' delay='0' />
   <mac address='52:54:00:60:f8:6d'/>
   <ip address='10.0.0.1' netmask='255.255.255.0'>
     <dhcp>
       <range start='10.0.0.1' end='10.0.0.254' />
     </dhcp>
   </ip>
 </network>

Four CentOS 7.1 VMs (4 GB RAM, 4 VCPU ) has been built for testing
at Fedora 23 KVM Hypervisor.

Controller node  - one VNIC (eth0 for mgmt network )
Network node    - three VNICs ( eth0 mgmt, eth1 vteps, eth2 public )
2xCompute node    - three VNICs ( eth0 mgmt, eth1 vteps, eth2 public )

*************************************************
Installation answer-file : answer4Node.txt
*************************************************
[general]
CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub
CONFIG_DEFAULT_PASSWORD=
CONFIG_MARIADB_INSTALL=y
CONFIG_GLANCE_INSTALL=y
CONFIG_CINDER_INSTALL=y
CONFIG_NOVA_INSTALL=y
CONFIG_NEUTRON_INSTALL=y
CONFIG_HORIZON_INSTALL=y
CONFIG_SWIFT_INSTALL=y
CONFIG_CEILOMETER_INSTALL=y
CONFIG_HEAT_INSTALL=n
CONFIG_CLIENT_INSTALL=y
CONFIG_NTP_SERVERS=
CONFIG_NAGIOS_INSTALL=y
EXCLUDE_SERVERS=
CONFIG_DEBUG_MODE=n
CONFIG_CONTROLLER_HOST=192.169.142.127
CONFIG_COMPUTE_HOSTS=192.169.142.137,
192.169.142.157
CONFIG_NETWORK_HOSTS=192.169.142.147
CONFIG_VMWARE_BACKEND=n
CONFIG_UNSUPPORTED=n
CONFIG_VCENTER_HOST=
CONFIG_VCENTER_USER=
CONFIG_VCENTER_PASSWORD=
CONFIG_VCENTER_CLUSTER_NAME=
CONFIG_STORAGE_HOST=192.169.142.127
CONFIG_USE_EPEL=y
CONFIG_REPO=
CONFIG_RH_USER=
CONFIG_SATELLITE_URL=
CONFIG_RH_PW=
CONFIG_RH_OPTIONAL=y
CONFIG_RH_PROXY=
CONFIG_RH_PROXY_PORT=
CONFIG_RH_PROXY_USER=
CONFIG_RH_PROXY_PW=
CONFIG_SATELLITE_USER=
CONFIG_SATELLITE_PW=
CONFIG_SATELLITE_AKEY=
CONFIG_SATELLITE_CACERT=
CONFIG_SATELLITE_PROFILE=
CONFIG_SATELLITE_FLAGS=
CONFIG_SATELLITE_PROXY=
CONFIG_SATELLITE_PROXY_USER=
CONFIG_SATELLITE_PROXY_PW=
CONFIG_AMQP_BACKEND=rabbitmq
CONFIG_AMQP_HOST=192.169.142.127
CONFIG_AMQP_ENABLE_SSL=n
CONFIG_AMQP_ENABLE_AUTH=n
CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
CONFIG_AMQP_SSL_PORT=5671
CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem
CONFIG_AMQP_SSL_SELF_SIGNED=y
CONFIG_AMQP_AUTH_USER=amqp_user
CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER
CONFIG_MARIADB_HOST=192.169.142.127
CONFIG_MARIADB_USER=root
CONFIG_MARIADB_PW=7207ae344ed04957
CONFIG_KEYSTONE_DB_PW=abcae16b785245c3
CONFIG_KEYSTONE_REGION=RegionOne
CONFIG_KEYSTONE_ADMIN_TOKEN=3ad2de159f9649afb0c342ba57e637d9
CONFIG_KEYSTONE_ADMIN_PW=7049f834927e4468
CONFIG_KEYSTONE_DEMO_PW=bf737b785cfa4398
CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
CONFIG_KEYSTONE_SERVICE_NAME=httpd
CONFIG_GLANCE_DB_PW=41264fc52ffd4fe8
CONFIG_GLANCE_KS_PW=f6a9398960534797
CONFIG_GLANCE_BACKEND=file
CONFIG_CINDER_DB_PW=5ac08c6d09ba4b69
CONFIG_CINDER_KS_PW=c8cb1ecb8c2b4f6f
CONFIG_CINDER_BACKEND=lvm
CONFIG_CINDER_VOLUMES_CREATE=y
CONFIG_CINDER_VOLUMES_SIZE=5G
CONFIG_CINDER_GLUSTER_MOUNTS=
CONFIG_CINDER_NFS_MOUNTS=
CONFIG_CINDER_NETAPP_LOGIN=
CONFIG_CINDER_NETAPP_PASSWORD=
CONFIG_CINDER_NETAPP_HOSTNAME=
CONFIG_CINDER_NETAPP_SERVER_PORT=80
CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=
CONFIG_CINDER_NETAPP_VOLUME_LIST=
CONFIG_CINDER_NETAPP_VFILER=
CONFIG_CINDER_NETAPP_VSERVER=
CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
CONFIG_CINDER_NETAPP_SA_PASSWORD=
CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
CONFIG_CINDER_NETAPP_STORAGE_POOLS=
CONFIG_NOVA_DB_PW=1e1b5aeeeaf342a8
CONFIG_NOVA_KS_PW=d9583177a2444f06
CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
CONFIG_NOVA_COMPUTE_PRIVIF=eth1
CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
CONFIG_NOVA_NETWORK_PUBIF=eth0
CONFIG_NOVA_NETWORK_PRIVIF=eth1

CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL=nova
CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
CONFIG_NOVA_NETWORK_VLAN_START=100
CONFIG_NOVA_NETWORK_NUMBER=1
CONFIG_NOVA_NETWORK_SIZE=255
CONFIG_NEUTRON_KS_PW=808e36e154bd4cee
CONFIG_NEUTRON_DB_PW=0e2b927a21b44737
CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
CONFIG_NEUTRON_L2_PLUGIN=ml2
CONFIG_NEUTRON_METADATA_PW=a965cd23ed2f4502
CONFIG_LBAAS_INSTALL=n
CONFIG_NEUTRON_METERING_AGENT_INSTALL=n
CONFIG_NEUTRON_FWAAS=n
CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan
CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan
CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch
CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
CONFIG_NEUTRON_ML2_VLAN_RANGES=
CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=1001:2000
CONFIG_NEUTRON_ML2_VXLAN_GROUP=239.1.1.2
CONFIG_NEUTRON_ML2_VNI_RANGES=1001:2000
CONFIG_NEUTRON_L2_AGENT=openvswitch
CONFIG_NEUTRON_LB_TENANT_NETWORK_TYPE=local
CONFIG_NEUTRON_LB_VLAN_RANGES=
CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=
CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE=vxlan
CONFIG_NEUTRON_OVS_VLAN_RANGES=
CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-ex
CONFIG_NEUTRON_OVS_BRIDGE_IFACES=
CONFIG_NEUTRON_OVS_TUNNEL_RANGES=1001:2000
CONFIG_NEUTRON_OVS_TUNNEL_IF=eth1
CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789

CONFIG_HORIZON_SSL=n
CONFIG_SSL_CERT=
CONFIG_SSL_KEY=
CONFIG_SSL_CACHAIN=
CONFIG_SWIFT_KS_PW=8f75bfd461234c30
CONFIG_SWIFT_STORAGES=
CONFIG_SWIFT_STORAGE_ZONES=1
CONFIG_SWIFT_STORAGE_REPLICAS=1
CONFIG_SWIFT_STORAGE_FSTYPE=ext4
CONFIG_SWIFT_HASH=a60aacbedde7429a
CONFIG_SWIFT_STORAGE_SIZE=2G
CONFIG_PROVISION_DEMO=y
CONFIG_PROVISION_TEMPEST=n
CONFIG_PROVISION_TEMPEST_USER=
CONFIG_PROVISION_TEMPEST_USER_PW=44faa4ebc3da4459
CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
CONFIG_HEAT_DB_PW=PW_PLACEHOLDER
CONFIG_HEAT_AUTH_ENC_KEY=fc3fb7fee61e46b0
CONFIG_HEAT_KS_PW=PW_PLACEHOLDER
CONFIG_HEAT_CLOUDWATCH_INSTALL=n
CONFIG_HEAT_USING_TRUSTS=y
CONFIG_HEAT_CFN_INSTALL=n
CONFIG_HEAT_DOMAIN=heat
CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER
CONFIG_CEILOMETER_SECRET=19ae0e7430174349
CONFIG_CEILOMETER_KS_PW=337b08d4b3a44753
CONFIG_MONGODB_HOST=192.169.142.127
CONFIG_NAGIOS_PW=02f168ee8edd44e4

**************************************
At this point run on Controller:-
**************************************

# yum -y  install centos-release-openstack-liberty
# yum -y  install openstack-packstack
# packstack --answer-file=./answer4Node.txt


***************************************************************************
After packstack install perform on  Network && Compute Nodes
***************************************************************************
[root@ip-192-169-142-147 network-scripts]# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="172.24.4.230"
NETMASK="255.255.255.240"
DNS1="83.221.202.254"
BROADCAST="172.24.4.239"
GATEWAY="172.24.4.225"
NM_CONTROLLED="no"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex
DEVICETYPE="ovs"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no
[root@ip-192-169-142-147 network-scripts]# cat ifcfg-eth2
DEVICE="eth2"
# HWADDR=00:22:15:63:E4:E2
ONBOOT="yes"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

*********************************
Switch to network service
*********************************
# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
# reboot

[root@ip-192-169-142-137 network-scripts]# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="172.24.4.229"
NETMASK="255.255.255.240"
DNS1="83.221.202.254"
BROADCAST="172.24.4.239"
GATEWAY="172.24.4.225"
NM_CONTROLLED="no"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex
DEVICETYPE="ovs"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no

[root@ip-192-169-142-137 network-scripts]# cat ifcfg-eth2
DEVICE="eth2"
# HWADDR=00:22:15:63:E4:E2
ONBOOT="yes"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

*********************************
Switch to network service
*********************************
# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
# reboot

[root@ip-192-169-142-157 network-scripts]# cat ifcfg-br-ex
DEVICE="br-ex"
BOOTPROTO="static"
IPADDR="172.24.4.238"
NETMASK="255.255.255.240"
DNS1="83.221.202.254"
BROADCAST="172.24.4.239"
GATEWAY="172.24.4.225"
NM_CONTROLLED="no"
TYPE="OVSIntPort"
OVS_BRIDGE=br-ex
DEVICETYPE="ovs"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="yes"
IPV6INIT=no

[root@ip-192-169-142-157 network-scripts]# cat ifcfg-eth2
DEVICE="eth2"
# HWADDR=00:22:15:63:E4:E2
ONBOOT="yes"
TYPE="OVSPort"
DEVICETYPE="ovs"
OVS_BRIDGE=br-ex
NM_CONTROLLED=no
IPV6INIT=no

*********************************
Switch to network service
*********************************
# chkconfig network on
# systemctl stop NetworkManager
# systemctl disable NetworkManager
# reboot


******************
Network Node
******************
[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns
snat-00223343-b771-4b7a-bbc1-10c5fe924a12
qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12
qdhcp-3371ea3f-35f5-418c-8d07-82a2a54b5c1d

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec snat-00223343-b771-4b7a-bbc1-10c5fe924a12 ip a |grep "inet "
    inet 127.0.0.1/8 scope host lo
    inet 70.0.0.13/24 brd 70.0.0.255 scope global sg-67571326-46
    inet 172.24.4.236/28 brd 172.24.4.239 scope global qg-57d45794-46

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec snat-00223343-b771-4b7a-bbc1-10c5fe924a12 iptables-save | grep SNAT
-A neutron-l3-agent-snat -o qg-57d45794-46 -j SNAT --to-source 172.24.4.236
-A neutron-l3-agent-snat -m mark ! --mark 0x2/0xffff -m conntrack --ctstate DNAT -j SNAT --to-source 172.24.4.236

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ip a |grep "inet "
    inet 127.0.0.1/8 scope host lo
    inet 70.0.0.1/24 brd 70.0.0.255 scope global qr-bdd297b1-05

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ip rule ls
0:    from all lookup local
32766:    from all lookup main
32767:    from all lookup default
1174405121:    from 70.0.0.1/24 lookup 1174405121

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ip route show table all
default via 70.0.0.13 dev qr-bdd297b1-05  table 1174405121
70.0.0.0/24 dev qr-bdd297b1-05  proto kernel  scope link  src 70.0.0.1
broadcast 70.0.0.0 dev qr-bdd297b1-05  table local  proto kernel  scope link  src 70.0.0.1
local 70.0.0.1 dev qr-bdd297b1-05  table local  proto kernel  scope host  src 70.0.0.1
broadcast 70.0.0.255 dev qr-bdd297b1-05  table local  proto kernel  scope link  src 70.0.0.1

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ifconfig
lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-bdd297b1-05: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 70.0.0.1  netmask 255.255.255.0  broadcast 70.0.0.255
        inet6 fe80::f816:3eff:fedf:c80b  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:df:c8:0b  txqueuelen 0  (Ethernet)
        RX packets 19  bytes 1530 (1.4 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 10  bytes 864 (864.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec snat-00223343-b771-4b7a-bbc1-10c5fe924a12 ifconfig
lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-57d45794-46: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.24.4.236  netmask 255.255.255.240  broadcast 172.24.4.239
        inet6 fe80::f816:3eff:fec7:1583  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:c7:15:83  txqueuelen 0  (Ethernet)
        RX packets 25  bytes 1698 (1.6 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 13  bytes 1074 (1.0 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

sg-67571326-46: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 70.0.0.13  netmask 255.255.255.0  broadcast 70.0.0.255
        inet6 fe80::f816:3eff:fed1:69b4  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:d1:69:b4  txqueuelen 0  (Ethernet)
        RX packets 11  bytes 914 (914.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 14  bytes 1140 (1.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0





   Neutron agents running on Network Node 



******************************************************************************
Neutron.conf should be the same on Controller and Network nodes
******************************************************************************

[root@ip-192-169-142-147 neutron(keystone_admin)]# cat neutron.conf | grep -v ^#|grep -v ^$
[DEFAULT]
verbose = True
router_distributed = True
debug = False
state_path = /var/lib/neutron
use_syslog = False
use_stderr = True
log_dir =/var/log/neutron
bind_host = 0.0.0.0
bind_port = 9696
core_plugin =neutron.plugins.ml2.plugin.Ml2Plugin
service_plugins =router
auth_strategy = keystone
base_mac = fa:16:3e:00:00:00
dvr_base_mac = fa:16:3f:00:00:00
mac_generation_retries = 16
dhcp_lease_duration = 86400
dhcp_agent_notification = True
allow_bulk = True
allow_pagination = False
allow_sorting = False
allow_overlapping_ips = True
advertise_mtu = False
dhcp_agents_per_network = 1
use_ssl = False
rpc_response_timeout=60
rpc_backend=rabbit
control_exchange=neutron
lock_path=/var/lib/neutron/lock
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
report_interval = 30
[keystone_authtoken]
auth_uri = http://192.169.142.127:5000/v2.0
identity_uri = http://192.169.142.127:35357
admin_tenant_name = services
admin_user = neutron
admin_password = 808e36e154bd4cee
[database]
[nova]
[oslo_concurrency]
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
kombu_reconnect_delay = 1.0
rabbit_host = 192.169.142.127
rabbit_port = 5672
rabbit_hosts = 192.169.142.127:5672
rabbit_use_ssl = False
rabbit_userid = guest
rabbit_password = guest
rabbit_virtual_host = /
rabbit_ha_queues = False
heartbeat_rate=2
heartbeat_timeout_threshold=0
[qos]

[root@ip-192-169-142-147 neutron(keystone_admin)]# cat l3_agent.ini | grep -v ^#|grep -v ^$
[DEFAULT]
debug = False
interface_driver =neutron.agent.linux.interface.OVSInterfaceDriver
handle_internal_only_routers = True
external_network_bridge = br-ex
metadata_port = 9697
send_arp_for_ha = 3
periodic_interval = 40
periodic_fuzzy_delay = 5
enable_metadata_proxy = True
router_delete_namespaces = False
# Set for Network Node
agent_mode = dvr_snat
[AGENT]

***********************************************************************
Next files are supposed to be  replicated to all compute nodes
***********************************************************************

[root@ip-192-169-142-147 neutron(keystone_admin)]# cat metadata_agent.ini | grep -v ^#|grep -v ^$

[DEFAULT]
debug = False
auth_url = http://192.169.142.127:5000/v2.0
auth_region = RegionOne
auth_insecure = False
admin_tenant_name = services
admin_user = neutron
admin_password = 808e36e154bd4cee
nova_metadata_ip = 192.169.142.127
nova_metadata_port = 8775
nova_metadata_protocol = http
metadata_proxy_shared_secret =a965cd23ed2f4502
metadata_workers =4
metadata_backlog = 4096
cache_url = memory://?default_ttl=5
[AGENT]

[root@ip-192-169-142-147 ml2(keystone_admin)]# cat ml2_conf.ini | grep -v ^#|grep -v ^$
[ml2]
type_drivers = vxlan
tenant_network_types = vxlan
mechanism_drivers =openvswitch,l2population
path_mtu = 0
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges =1001:2000
vxlan_group =239.1.1.2
[ml2_type_geneve]
[securitygroup]
enable_security_group = True
[agent]
l2_population=True 


[root@ip-192-169-142-147 ml2(keystone_admin)]# cat openvswitch_agent.ini | grep -v ^#|grep -v ^$
[ovs]
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip =10.0.0.147  <== updated corresponently
bridge_mappings =physnet1:br-ex
enable_tunneling=True
[agent]
polling_interval = 2
tunnel_types =vxlan
vxlan_udp_port =4789
l2_population = True
arp_responder = True

prevent_arp_spoofing = True
enable_distributed_routing = True
drop_flows_on_start=False
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

******************************
On Compute Node
******************************

[root@ip-192-169-142-137 neutron]# cat neutron.conf | grep -v ^#|grep -v ^$
[DEFAULT]
verbose = True
debug = False
state_path = /var/lib/neutron
use_syslog = False
use_stderr = True
log_dir =/var/log/neutron
bind_host = 0.0.0.0
bind_port = 9696
core_plugin =neutron.plugins.ml2.plugin.Ml2Plugin
service_plugins =router
auth_strategy = keystone
base_mac = fa:16:3e:00:00:00
mac_generation_retries = 16
dhcp_lease_duration = 86400
dhcp_agent_notification = True
allow_bulk = True
allow_pagination = False
allow_sorting = False
allow_overlapping_ips = True
advertise_mtu = False
dhcp_agents_per_network = 1
use_ssl = False
rpc_response_timeout=60
rpc_backend=rabbit
control_exchange=neutron
lock_path=/var/lib/neutron/lock
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
report_interval = 30
[keystone_authtoken]
auth_uri = http://127.0.0.1:35357/v2.0/
identity_uri = http://127.0.0.1:5000
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
[database]
[nova]
[oslo_concurrency]
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
kombu_reconnect_delay = 1.0
rabbit_host = 192.169.142.127
rabbit_port = 5672
rabbit_hosts = 192.169.142.127:5672
rabbit_use_ssl = False
rabbit_userid = guest
rabbit_password = guest
rabbit_virtual_host = /
rabbit_ha_queues = False
heartbeat_rate=2
heartbeat_timeout_threshold=0
[qos]

[root@ip-192-169-142-137 neutron]# cat l3_agent.ini | grep -v ^#|grep -v ^$
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Set for Compute Node
agent_mode = dvr
[AGENT]

**********************************************************************************
On each Compute node neutron-l3-agent and neutron-metadata-agent are
supposed to be started.
**********************************************************************************
# yum install  openstack-neutron-ml2  
# systemctl start neutron-l3-agent
# systemctl start neutron-metadata-agent
# systemctl enable neutron-l3-agent
# systemctl enable neutron-metadata-agent


[root@ip-192-169-142-137 ml2]# cat ml2_conf.ini | grep -v ^#|grep -v ^$
[ml2]
type_drivers = vxlan
tenant_network_types = vxlan
mechanism_drivers =openvswitch,l2population
path_mtu = 0
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges =1001:2000
vxlan_group =239.1.1.2
[ml2_type_geneve]
[securitygroup]
enable_security_group = True
[agent]
l2_population=True 


[root@ip-192-169-142-137 ml2]# cat openvswitch_agent.ini | grep -v ^#|grep -v ^$
[ovs]
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip =10.0.0.137
bridge_mappings =physnet1:br-ex
enable_tunneling=True
[agent]
polling_interval = 2
tunnel_types =vxlan
vxlan_udp_port =4789
l2_population = True
arp_responder = True

prevent_arp_spoofing = True
enable_distributed_routing = True
drop_flows_on_start=False
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

***********************
Compute Node
***********************

[root@ip-192-169-142-157 ~]# ip netns
fip-115edb73-ebe2-4e48-811f-4823fc19d9b6
qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12

[root@ip-192-169-142-157 ~]# ip netns exec  qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ip a | grep "inet "
    inet 127.0.0.1/8 scope host lo
    inet 169.254.31.28/31 scope global rfp-00223343-b
    inet 172.24.4.231/32 brd 172.24.4.231 scope global rfp-00223343-b
    inet 172.24.4.233/32 brd 172.24.4.233 scope global rfp-00223343-b
    inet 70.0.0.1/24 brd 70.0.0.255 scope global qr-bdd297b1-05

[root@ip-192-169-142-157 ~]# ip netns exec  qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 iptables-save -t nat | grep "^-A"|grep l3-agent
-A PREROUTING -j neutron-l3-agent-PREROUTING
-A OUTPUT -j neutron-l3-agent-OUTPUT
-A POSTROUTING -j neutron-l3-agent-POSTROUTING
-A neutron-l3-agent-OUTPUT -d 172.24.4.231/32 -j DNAT --to-destination 70.0.0.15
-A neutron-l3-agent-OUTPUT -d 172.24.4.233/32 -j DNAT --to-destination 70.0.0.17
-A neutron-l3-agent-POSTROUTING ! -i rfp-00223343-b ! -o rfp-00223343-b -m conntrack ! --ctstate DNAT -j ACCEPT
-A neutron-l3-agent-PREROUTING -d 169.254.169.254/32 -i qr-+ -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 9697
-A neutron-l3-agent-PREROUTING -d 172.24.4.231/32 -j DNAT --to-destination 70.0.0.15
-A neutron-l3-agent-PREROUTING -d 172.24.4.233/32 -j DNAT --to-destination 70.0.0.17
-A neutron-l3-agent-float-snat -s 70.0.0.15/32 -j SNAT --to-source 172.24.4.231
-A neutron-l3-agent-float-snat -s 70.0.0.17/32 -j SNAT --to-source 172.24.4.233
-A neutron-l3-agent-snat -j neutron-l3-agent-float-snat
-A neutron-postrouting-bottom -m comment --comment "Perform source NAT on outgoing traffic." -j neutron-l3-agent-snat

[root@ip-192-169-142-157 ~]# ip netns exec  fip-115edb73-ebe2-4e48-811f-4823fc19d9b6  ip a | grep "inet "
    inet 127.0.0.1/8 scope host lo
    inet 169.254.31.29/31 scope global fpr-00223343-b
    inet 172.24.4.237/28 brd 172.24.4.239 scope global fg-d00d8427-25

[root@ip-192-169-142-157 ~]# ip netns exec  qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ip rule ls
0:    from all lookup local
32766:    from all lookup main
32767:    from all lookup default
57480:    from 70.0.0.17 lookup 16
57481:    from 70.0.0.15 lookup 16
1174405121:    from 70.0.0.1/24 lookup 1174405121

[root@ip-192-169-142-157 ~]# ip netns exec  qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ip route show table 16
default via 169.254.31.29 dev rfp-00223343-b

[root@ip-192-169-142-157 ~]# ip netns exec  qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ip route
70.0.0.0/24 dev qr-bdd297b1-05  proto kernel  scope link  src 70.0.0.1
169.254.31.28/31 dev rfp-00223343-b  proto kernel  scope link  src 169.254.31.28

[root@ip-192-169-142-157 ~]# ip netns exec  fip-115edb73-ebe2-4e48-811f-4823fc19d9b6 ip route
default via 172.24.4.225 dev fg-d00d8427-25
169.254.31.28/31 dev fpr-00223343-b  proto kernel  scope link  src 169.254.31.29
172.24.4.224/28 dev fg-d00d8427-25  proto kernel  scope link  src 172.24.4.237
172.24.4.231 via 169.254.31.28 dev fpr-00223343-b
172.24.4.233 via 169.254.31.28 dev fpr-00223343-b

[root@ip-192-169-142-157 ~]# ip netns exec  fip-115edb73-ebe2-4e48-811f-4823fc19d9b6 ifconfig
fg-d00d8427-25: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.24.4.237  netmask 255.255.255.240  broadcast 172.24.4.239
        inet6 fe80::f816:3eff:fe10:3928  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:10:39:28  txqueuelen 0  (Ethernet)
        RX packets 46  bytes 4382 (4.2 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 16  bytes 1116 (1.0 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

fpr-00223343-b: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 169.254.31.29  netmask 255.255.255.254  broadcast 0.0.0.0
        inet6 fe80::d88d:7ff:fe1c:23a5  prefixlen 64  scopeid 0x20<link>
        ether da:8d:07:1c:23:a5  txqueuelen 1000  (Ethernet)
        RX packets 7  bytes 738 (738.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 7  bytes 738 (738.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@ip-192-169-142-157 ~]# ip netns exec  qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ifconfig
lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-bdd297b1-05: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 70.0.0.1  netmask 255.255.255.0  broadcast 70.0.0.255
        inet6 fe80::f816:3eff:fedf:c80b  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:df:c8:0b  txqueuelen 0  (Ethernet)
        RX packets 9  bytes 746 (746.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 10  bytes 864 (864.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

rfp-00223343-b: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 169.254.31.28  netmask 255.255.255.254  broadcast 0.0.0.0
        inet6 fe80::5c77:1eff:fe6b:5a21  prefixlen 64  scopeid 0x20<link>
        ether 5e:77:1e:6b:5a:21  txqueuelen 1000  (Ethernet)
        RX packets 7  bytes 738 (738.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 7  bytes 738 (738.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

***********************
Network Node
***********************

[root@ip-192-169-142-147 ~(keystone_admin)]# ovs-vsctl show
738cdbf4-4dde-4887-a95e-cc994702138e
    Bridge br-ex
        Port br-ex
            Interface br-ex
                type: internal
        Port phy-br-ex
            Interface phy-br-ex
                type: patch
                options: {peer=int-br-ex}
        Port "eth2"
            Interface "eth2"
        Port "qg-57d45794-46"
            Interface "qg-57d45794-46"
                type: internal
    Bridge br-tun
        fail_mode: secure
        Port "vxlan-0a000089"
            Interface "vxlan-0a000089"
                type: vxlan
                options: {df_default="true", in_key=flow, local_ip="10.0.0.147", out_key=flow, remote_ip="10.0.0.137"}
        Port br-tun
            Interface br-tun
                type: internal
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
        Port "vxlan-0a00009d"
            Interface "vxlan-0a00009d"
                type: vxlan
                options: {df_default="true", in_key=flow, local_ip="10.0.0.147", out_key=flow, remote_ip="10.0.0.157"}
    Bridge br-int
        fail_mode: secure
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "qr-bdd297b1-05"        
            tag: 1
            Interface "qr-bdd297b1-05"

                type: internal
        Port "sg-67571326-46"           <=========
            tag: 1
            Interface "sg-67571326-46"
                type: internal

        Port int-br-ex
            Interface int-br-ex
                type: patch
                options: {peer=phy-br-ex}
        Port br-int
            Interface br-int
                type: internal
        Port "tap06dd3fa7-c0"
            tag: 1
            Interface "tap06dd3fa7-c0"
                type: internal
    ovs_version: "2.4.0"



***********************
SNAT forwarding
***********************

==== Compute Node ====

[root@ip-192-169-142-157 ~]# ip netns
fip-115edb73-ebe2-4e48-811f-4823fc19d9b6
qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12

[root@ip-192-169-142-157 ~]# ip netns exec qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12  ip rule ls
0:    from all lookup local
32766:    from all lookup main
32767:    from all lookup default
57480:    from 70.0.0.17 lookup 16
57481:    from 70.0.0.15 lookup 16
1174405121:    from 70.0.0.1/24 lookup 1174405121

[root@ip-192-169-142-157 ~]# ip netns exec qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12 ip route show table all

default via 70.0.0.13 dev qr-bdd297b1-05  table 1174405121 <====
default via 169.254.31.29 dev rfp-00223343-b  table 16
70.0.0.0/24 dev qr-bdd297b1-05  proto kernel  scope link  src 70.0.0.1
169.254.31.28/31 dev rfp-00223343-b  proto kernel  scope link  src 169.254.31.28 


====Network Node  ====

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns
snat-00223343-b771-4b7a-bbc1-10c5fe924a12
qrouter-00223343-b771-4b7a-bbc1-10c5fe924a12
qdhcp-3371ea3f-35f5-418c-8d07-82a2a54b5c1d

[root@ip-192-169-142-147 ~(keystone_admin)]# ip netns exec snat-00223343-b771-4b7a-bbc1-10c5fe924a12 ifconfig
lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-57d45794-46: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.24.4.236  netmask 255.255.255.240  broadcast 172.24.4.239
        inet6 fe80::f816:3eff:fec7:1583  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:c7:15:83  txqueuelen 0  (Ethernet)
        RX packets 49  bytes 4463 (4.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 13  bytes 1074 (1.0 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

sg-67571326-46: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 70.0.0.13  netmask 255.255.255.0  broadcast 70.0.0.255

        inet6 fe80::f816:3eff:fed1:69b4  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:d1:69:b4  txqueuelen 0  (Ethernet)
        RX packets 11  bytes 914 (914.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 14  bytes 1140 (1.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


   On Compute node
     
  
   On Network node
  

  


   *********************************************************************
   SNAT sample VM with no FIP downloading data from Internet
   `iftop -i eth2` snapshot on Network Node.
   *********************************************************************

   Download running on VM with FIP on 192.169.142.157 


          Download running on VM with FIP on 192.169.142.137 
   


 System information