Upgrading OpenstacK :-: Havana to Icehouse

Taking a backup of all databases
root@ctrl01:~# mysqldump -u root -pPassword123 nova > Havana-nova.sql ; mysqldump -u root -pPassword123 keystone > Havana-keystone.sql ; mysqldump -u root -pPassword123 glance > Havana-glance.sql ; mysqldump -u root -pPassword123 quantum > Havana-quantum.sql ; mysqldump -u root -pPassword123 cinder > Havana-cinder.sql

Add and remove old repo on all nodes
# apt-add-repository -r cloud-archive:havana ; apt-add-repository cloud-archive:icehouse

Disable Compute file injection (optional)
root@compute-01:~# vim /etc/nova/nova-compute.conf
root@compute-01:~# cat /etc/nova/nova-compute.conf
[DEFAULT]
libvirt_type=kvm
libvirt_ovs_bridge=br-int
libvirt_vif_type=ethernet
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
libvirt_use_virtio_for_bridges=True
libvirt_cpu_mode=host-passthrough
libvirt_inject_partition = -2

Update and Upgrade all Nodes
# apt-get update ; apt-get dist-upgrade

Upgrading of Service

Keystone
root@ctrl01:~# vi /etc/keystone/keystone.conf
[database]
connection = mysql://keystone:password@172.24.1.60/keystone

root@ctrl02:~# vi /etc/keystone/keystone.conf
[database]
connection = mysql://keystone:password@172.24.1.60/keystone

root@ctrl02:/etc/init.d# service keystone stop

root@ctrl01:~# service keystone stop
root@ctrl01:~# keystone-manage token_flush
root@ctrl01:~# keystone-manage db_sync
root@ctrl01:~# service keystone start
root@ctrl01:~# keystone user-list
root@ctrl01:~# keystone –version
0.7.1

Glance
root@glance-01:~# mysql -u root -pPassword123 -h 172.24.1.60 <<EOF
use glance;
SET foreign_key_checks = 0;
ALTER TABLE glance.image_locations CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.image_members CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.image_properties CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.image_tags CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.images CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.migrate_version CONVERT TO CHARACTER SET ‘utf8’;
SET foreign_key_checks = 1;
exit
EOF

root@glance-01:~# vi /etc/glance/glance-api.conf
[DEFAULT]
rpc_backend = rabbit
rabbit_host = 172.24.1.60

[database]
connection = mysql://glance:password@172.24.1.60/glance
root@glance-01:~# vi /etc/glance/glance-registry.conf
[database]
connection = mysql://glance:password@172.24.1.60/glance

root@glance-01:~# service glance-api stop
root@glance-01:~# service glance-registry stop
root@glance-01:~# glance-manage db_sync
root@glance-01:~# service glance-api start
root@glance-01:~# service glance-registry start
root@glance-01:~# glance index

Controller services
root@ctrl01:~# vim /etc/nova/nova.conf
root@ctrl02:~# cat /etc/nova/nova.conf
[DEFAULT]
rpc_backend = rabbit
rabbit_host = 172.24.1.60

root@ctrl01:~# vim /etc/nova/api-paste.ini
root@ctrl02:~# vim /etc/nova/api-paste.ini

[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory

root@ctrl02:/etc/init.d# service nova-api stop
root@ctrl02:/etc/init.d# service nova-scheduler stop
root@ctrl02:/etc/init.d# service nova-conductor stop
root@ctrl02:/etc/init.d# service nova-cert stop
root@ctrl02:/etc/init.d# service nova-consoleauth stop
root@ctrl02:/etc/init.d# service nova-novncproxy stop

root@ctrl01:~# service nova-api stop
root@ctrl01:~# service nova-scheduler stop
root@ctrl01:~# service nova-conductor stop
root@ctrl01:~# service nova-cert stop
root@ctrl01:~# service nova-consoleauth stop
root@ctrl01:~# service nova-novncproxy stop
root@ctrl01:~# nova-manage db sync

root@ctrl01:~# service nova-api start
root@ctrl01:~# service nova-scheduler start
root@ctrl01:~# service nova-conductor start
root@ctrl01:~# service nova-cert start
root@ctrl01:~# service nova-consoleauth start
root@ctrl01:~# service nova-novncproxy start

root@ctrl02:/etc/init.d# service nova-api start
root@ctrl02:/etc/init.d# service nova-scheduler start
root@ctrl02:/etc/init.d# service nova-conductor start
root@ctrl02:/etc/init.d# service nova-cert start
root@ctrl02:/etc/init.d# service nova-consoleauth start
root@ctrl02:/etc/init.d# service nova-novncproxy start
root@ctrl01:~# nova-manage service list

Neutron
root@ctrl01:~# vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = local,gre
tenant_network_type = gre
mechanism drivers = openvswitch

[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000

[ml2_type_vxlan]
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True

root@ctrl01:~# scp /etc/neutron/plugins/ml2/ml2_conf.ini root@ctrl02:/etc/neutron/plugins/ml2/ml2_conf.ini

root@ctrl02:~# service neutron-server stop
root@ctrl01:~# service neutron-server stop

root@ctrl01:~# neutron-db-manage –config-file /etc/neutron/neutron.conf –config-file /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini stamp havana
root@ctrl01:~# neutron-db-manage –config-file /etc/neutron/neutron.conf –config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade icehouse
root@ctrl01:~# python -m neutron.db.migration.migrate_to_ml2 openvswitch mysql://quantum:password@172.24.1.60/quantum

root@ctrl01:~# vim /etc/neutron/neutron.conf
core_plugin = ml2
service_plugins = router
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://172.24.1.60:8774/v2
nova_admin_username = nova
nova_admin_tenant_id = 8fd48ddfa46841bd859caf2db1c8a912
nova_admin_password = password
nova_admin_auth_url = http://172.24.1.60:35357/v2.0

root@ctrl01:~# service neutron-server start
root@ctrl01:~# service neutron-server status
root@ctrl01:~# service neutron-ovs-cleanup restart
root@ctrl01:~# service neutron-dhcp-agent restart
root@ctrl01:~# service neutron-l3-agent restart
root@ctrl01:~# service neutron-metadata-agent restart
root@ctrl01:~# service neutron-plugin-openvswitch-agent restart

Cinder Upgrade
root@cinder-01:~# service cinder-api stop
root@cinder-01:~# service cinder-volume stop
root@cinder-01:~# service cinder-scheduler stop
root@cinder-01:~# cinder-manage db sync
root@cinder-01:~# service cinder-api start
root@cinder-01:~# service cinder-volume start
root@cinder-01:~# service cinder-scheduler start
root@cinder-01:~# cinder list

Dashboard
root@ctrl01:~# vim /etc/openstack-dashboard/local_settings.py
OPENSTACK_KEYSTONE_DEFAULT_ROLE = “_Member_”

root@ctrl01:~# service apache2 restart
root@ctrl01:~# scp /etc/openstack-dashboard/local_settings.py root@ctrl02:/etc/openstack-dashboard/local_settings.py

Compute nodes
root@compute-01:~# vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = local,gre
tenant_network_type = gre
mechanism_drivers = openvswitch

[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000

[ml2_type_vxlan]
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True

root@compute-01:~# service neutron-ovs-cleanup restart
root@compute-01:~# service neutron-plugin-openvswitch-agent restart
root@compute-01:~# service nova-compute restart

root@ctrl01:~# ps auxwww | grep neutron
root@ctrl01:~# kill -9 <PID_OF_Neutron>
root@ctrl01:# cd /etc/init.d; for i in $(ls neutron-*); do service $i restart; cd ; done
root@ctrl01:# cd /etc/init.d; for i in $(ls nova-*); do service $i restart; cd ; done

################ Trouble shooting steps  #########################
root@ctrl01:~# ip netns
qdhcp-a984c66c-b870-4b35-8db4-c49acddf51fe
qrouter-40ca56ad-605d-42d5-b049-041aad54b310
root@ctrl01:~# ip netns exec qdhcp-a984c66c-b870-4b35-8db4-c49acddf51fe ip a
19: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
21: tapbd1a6144-48: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN
    link/ether fa:16:3e:d9:03:f4 brd ff:ff:ff:ff:ff:ff
    inet 192.168.100.2/24 brd 192.168.100.255 scope global tapbd1a6144-48
    inet 169.254.169.254/16 brd 169.254.255.255 scope global tapbd1a6144-48
    inet6 fe80::f816:3eff:fed9:3f4/64 scope link
       valid_lft forever preferred_lft forever
root@ctrl01:~# ip netns exec qrouter-40ca56ad-605d-42d5-b049-041aad54b310 iptables -L -t nat | grep 169
REDIRECT   tcp  —  anywhere             169.254.169.254      tcp dpt:http redir ports 8775
root@ctrl01:~# curl http://172.24.1.60:8775
1.0
2007-01-19
2007-03-01
2007-08-29
2007-10-10
2007-12-15
2008-02-01
2008-09-01
2009-04-04
latestroot@ctrl01:~#

####
ovs-vsctl show
ovs-ofctl show br-int
ip netns list
ip netns exec qrouter-242bf8e7-ee89-46a6-9e33-fae4b5a80244 iptables -L -t nat | grep 169

dpkg-reconfigure openvswitch-datapath-dkms
###
keystone endpoint-create –region RegionOne –service-id=$(keystone service-list | awk ‘/ metering / {print $2}’) –publicurl=http://172.24.1.60:8777 –internalurl=http://172.24.1.60:8777 –adminurl=http://172.24.1.60:8777

Upgrading OpenstacK :-: Grizzly to Havana

Taking a backup of all databases
root@ctrl01:~# mysqldump -u root -pPassword123 nova > grizzly-nova.sql ; mysqldump -u root -pPassword123 keystone > grizzly-keystone.sql ; mysqldump -u root -pPassword123 glance > grizzly-glance.sql ; mysqldump -u root -pPassword123 quantum > grizzly-quantum.sql ; mysqldump -u root -pPassword123 cinder > grizzly-cinder.sql

Add the new repository and remove old one
root@ctrl01:~# apt-add-repository -r cloud-archive:grizzly ; apt-add-repository cloud-archive:havana  (on all nodes)

Stamp the db to Grizzly
root@ctrl01:~# quantum-db-manage –config-file /etc/quantum/quantum.conf –config-file /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini stamp grizzly

Make the required changes
ctrl01
    vim /etc/nova/nova.conf
        add:
                enabled_apis=ec2,osapi_compute,metadata
                service_neutron_metadata_proxy = True
                neutron_metadata_proxy_shared_secret = helloOpenStack
                nova_metadata_port = 8775
                nova_metadata_ip = 172.24.1.60
                metadata_host=172.24.1.60
                metadata_port=8775
                metadata_listen=0.0.0.0
                metadata_listen_port=8775
                metadata_manager=nova.api.manager.MetadataManager
                network_api_class=nova.network.neutronv2.api.API
                neutron_url=http://172.24.1.60:9696
                neutron_auth_strategy=keystone
                neutron_admin_tenant_name=service
                neutron_admin_username=quantum
                neutron_admin_password=password
                neutron_admin_auth_url=http://172.24.1.60:35357/v2.0
                libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
                linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver    

                [database]
                connection = mysql://nova:password@172.24.1.60/nova

                [keystone_authtoken]
                auth_uri = http://172.24.1.60:5000/v2.0
                auth_host = 172.24.1.60
                auth_port = 35357
                auth_protocol = http
                admin_tenant_name = service
                admin_user = nova
                admin_password = password

                Remove:
                #service_quantum_metadata_proxy = True
                #quantum_metadata_proxy_shared_secret = helloOpenStack
                #firewall_driver=nova.virt.firewall.NoopFirewallDriver
                #security_group_api=quantum
                #network_api_class=nova.network.quantumv2.api.API
                #quantum_url=http://172.24.1.60:9696
                #quantum_auth_strategy=keystone
                #quantum_admin_tenant_name=service
                #quantum_admin_username=quantum
                #quantum_admin_password=password
                #quantum_admin_auth_url=http://172.24.1.60:35357/v2.0
                #libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
                #linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
                #sql_connection=mysql://nova:password@172.24.1.60/nova

/etc/cinder/cinder.conf
    add:
            [database]
            connection = mysql://cinder:password@172.24.1.60/cinder

            [keystone_authtoken]
            auth_uri = http://172.24.1.60:5000

    remove:
            #sql_connection = mysql://cinder:password@172.24.1.60/cinder

/etc/neutron/api-paste.ini
    add:
            [filter:authtoken]
                paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
                auth_host = 172.24.1.60
                auth_uri = http://172.24.1.60:5000
                auth_port = 35357
                auth_protocol = http
                admin_tenant_name = service
                admin_user = quantum
                admin_password = password

/etc/neutron/neutron.conf
        [database]
        sql_connection = mysql://quantum:password@172.24.1.60/quantum

/etc/neutron/neutron/plugins/openvswitch/ovs_neutron_plugin.ini

Glance-01
/etc/glance/glance-api.conf
    add:
        [database]
        connection = mysql://glance:password@172.24.1.60/glance

        [keystone_authtoken]
        auth_port = 35357
        auth_protocol = http
        auth_uri = http://172.24.1.60:5000
        auth_host = 172.24.1.60
        admin_tenant_name = service
        admin_user = glance
        admin_password = password

/etc/glance/glance-api-paste.ini
        remove:
            auth_host = 172.24.1.60
            auth_port = 35357
            auth_protocol = http
            admin_tenant_name = service
            admin_user = glance
            admin_password = password

/etc/glance/glance-registry
            add:
            [keystone_authtoken]
            auth_port = 35357
            auth_protocol = http
            auth_uri = http://172.24.1.60:5000
            auth_host = 172.24.1.60
            admin_tenant_name = service
            admin_user = glance
            admin_password = password

            [database]
            connection = mysql://glance:password@172.24.1.60/glance

/etc/glance/glance-registry-paste
remove:
            auth_host = 172.24.1.60
            auth_port = 35357
            auth_protocol = http
            admin_tenant_name = service
            admin_user = glance
            admin_password = password

Upgrade all the configuration files as per required
root@ctrl01:~# apt-get update ; apt-get dist-upgrade (on all nodes)

change the mysql db for Havana
root@ctrl01:~# neutron-db-manage –config-file /etc/quantum/quantum.conf –config-file /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini upgrade havana

OpenStack Identity
root@ctrl01:~# service keystone stop
root@ctrl01:~# keystone-manage token_flush
root@ctrl01:~# keystone-manage db_sync
root@ctrl01:~# service keystone start
root@ctrl01:~# keystone user-list

OpenStack Image Service:
root@glance-01:~# mysql -u root -pPassword123 -h 172.24.1.60 <<EOF
use glance;
SET foreign_key_checks = 0;
ALTER TABLE glance.image_locations CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.image_members CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.image_properties CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.image_tags CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.images CONVERT TO CHARACTER SET ‘utf8’;
ALTER TABLE glance.migrate_version CONVERT TO CHARACTER SET ‘utf8’;
SET foreign_key_checks = 1;
exit
EOF

root@glance-01:~# service glance-api stop
root@glance-01:~# service glance-registry stop
root@glance-01:~# glance-manage db_sync
root@glance-01:~# service glance-api start
root@glance-01:~# service glance-registry start
root@glance-01:~# glance index

OpenStack Compute
root@ctrl01:~# service nova-api stop
root@ctrl01:~# service nova-scheduler stop
root@ctrl01:~# service nova-conductor stop
root@ctrl01:~# service nova-cert stop
root@ctrl01:~# service nova-consoleauth stop
root@ctrl01:~# service nova-novncproxy stop
root@ctrl01:~# nova-manage db sync
root@ctrl01:~# service nova-api start
root@ctrl01:~# service nova-scheduler start
root@ctrl01:~# service nova-conductor start
root@ctrl01:~# service nova-cert start
root@ctrl01:~# service nova-consoleauth start
root@ctrl01:~# service nova-novncproxy start
root@ctrl01:~# nova-manage service list

OpenStack Block Storage
root@cinder-01:~# service cinder-api stop
root@cinder-01:~# service cinder-scheduler stop
root@cinder-01:~# cinder-manage db sync
root@cinder-01:~# service cinder-api start
root@cinder-01:~# service cinder-scheduler start
root@cinder-01:~# service cinder-volume start

root@ctrl01:~# ps auxwww | grep quantum

(Kill all the process related to quantum)

High Avaliability and Load Balancing In OpenStack

HA

Services Distribution

3 Controller Nodes =  Nova API,Nova Conductor,Nova Consoleauth,Nova Scheduler,Neutron Server,Neutron OVS Plugin,Neutron DHCP Agent,Keystone,RabbitMQ Server, Glance API/Registry

15 Compute Nodes = Nova Compute, Neutron OVS Agent

2 Load-Balancer Nodes = HAProxy, Keepalived

2 DB Nodes = MySQL Server, Galera

2 Storage Nodes = Cinder API, Cinder Scheduler, Cinder Volume, TGT services

Network Topology

Topology
Post Installation Configuration

Network Configuration
IP Address Configuration

server lb01
[cloud@lb01]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.11
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

[cloud@lb01]# sudo /etc/init.d/networking restart

server lb02
[cloud@lb02]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.12
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

[cloud@lb02]# sudo /etc/init.d/networking restart

server db01
[cloud@db01]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.21
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

[cloud@db01]# sudo /etc/init.d/networking restart

server db02
[cloud@db02]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.22
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

[cloud@db02]# sudo /etc/init.d/networking restart

server sn01
[cloud@sn01]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.31
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

[cloud@sn01]# sudo /etc/init.d/networking restart

server sn02
[cloud@sn02]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.32
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

[cloud@sn02]# sudo /etc/init.d/networking restart

server ctrl01
[cloud@ctrl01]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.41
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

# Public Network: Bridged Interface
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ifconfig $IFACE 0.0.0.0 down

[cloud@ctrl01]# sudo /etc/init.d/networking restart

server ctrl02
[cloud@ctrl02]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.42
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

# Public Network: Bridged Interface
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ifconfig $IFACE 0.0.0.0 down

[cloud@ctrl02]# sudo /etc/init.d/networking restart

server ctrl03
[cloud@ctrl03]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.43
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

# Public Network: Bridged Interface
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ifconfig $IFACE 0.0.0.0 down

[cloud@ctrl03]# sudo /etc/init.d/networking restart

server wrk01
[cloud@wrk01]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.51
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

# Public Network: Bridged Interface
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ifconfig $IFACE 0.0.0.0 down

[cloud@wrk01]# sudo /etc/init.d/networking restart

server wrk02
[cloud@wrk02]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.52
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

# Public Network: Bridged Interface
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ifconfig $IFACE 0.0.0.0 down

[cloud@wrk02]# sudo /etc/init.d/networking restart

server wrk03
[cloud@wrk03]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.53
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

# Public Network: Bridged Interface
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ifconfig $IFACE 0.0.0.0 down

[cloud@wrk03]# sudo /etc/init.d/networking restart

server wrk04
[cloud@wrk04]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.54
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

# Public Network: Bridged Interface
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ifconfig $IFACE 0.0.0.0 down

[cloud@wrk04]# sudo /etc/init.d/networking restart

server wrk05
[cloud@wrk05]# sudo vim  /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet static
address 192.168.0.55
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
gateway 192.168.0.1
dns-nameservers 192.168.0.254
dns-search mycloud.com

# Public Network: Bridged Interface
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ifconfig $IFACE 0.0.0.0 down

[cloud@wrk05]# sudo /etc/init.d/networking restart

Host File Configuration
server lb01
[cloud@lb01]# sudo vim /etc/hosts
192.168.0.11    lb01.mycloud.com    lb01
192.168.0.12    lb02.mycloud.com    lb02

192.168.0.21    db01.mycloud.com    db01
192.168.0.22    db02.mycloud.com    db02

192.168.0.31    sn01.mycloud.com    sn01
192.168.0.32    sn02.mycloud.com    sn02

192.168.0.41    ctrl01.mycloud.com    ctrl01
192.168.0.42    ctrl02.mycloud.com    ctrl02
192.168.0.43    ctrl03.mycloud.com    ctrl03

192.168.0.51    wrk01.mycloud.com    wrk01
192.168.0.52    wrk02.mycloud.com    wrk02
192.168.0.53    wrk03.mycloud.com    wrk03
192.168.0.54    wrk04.mycloud.com    wrk04
192.168.0.55    wrk05.mycloud.com    wrk05

[cloud@lb01]# scp /etc/hosts lb02:/etc/hosts
[cloud@lb01]# scp /etc/hosts db01:/etc/hosts
[cloud@lb01]# scp /etc/hosts db02:/etc/hosts
[cloud@lb01]# scp /etc/hosts sn01:/etc/hosts
[cloud@lb01]# scp /etc/hosts sn02:/etc/hosts
[cloud@lb01]# scp /etc/hosts ctrl01:/etc/hosts
[cloud@lb01]# scp /etc/hosts ctrl02:/etc/hosts
[cloud@lb01]# scp /etc/hosts ctrl03:/etc/hosts
[cloud@lb01]# scp /etc/hosts wrk01:/etc/hosts
[cloud@lb01]# scp /etc/hosts wrk02:/etc/hosts
[cloud@lb01]# scp /etc/hosts wrk03:/etc/hosts
[cloud@lb01]# scp /etc/hosts wrk04:/etc/hosts
[cloud@lb01]# scp /etc/hosts wrk05:/etc/hosts

Time Synchronization
Repeat the steps on all node
# sudo apt-get install -y ntp
# vi /etc/ntp.conf
server ns01.mycloud.com
# /etc/init.d/ntp restart
# ntpq -p
remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*ns01.mycloud.com       .GPS.            1 u  105  122  173   26.021    0.013   0.026

Repository Configuration
Repeat the steps on all nodes
# sudo su
# apt-get install python-software-properties
# add-apt-repository cloud-archive:havana
# apt-get update && apt-get dist-upgrade -y
# reboot

######################### Load Balancer Configuration ##########################

Load Balancer Configuration

Common steps on both lb01 and lb02
# vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind=1
# sysctl -p

# apt-get install -y keepalived haproxy
# mkdir /var/lib/haproxy
# chown root:root /var/lib/haproxy/

###############################    Server lb01  #########################################
# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email {
root@mycloud.com
}
notification_email_from keepalived@mycloud.com
smtp_server localhost
smtp_connect_timeout 30
router_id lb01
}
vrrp_script haproxy {
script   “killall -0 haproxy”
interval 2
weight   2
}
vrrp_instance 50 {
virtual_router_id 50

# Advert interval
advert_int 1

# for electing MASTER, highest priority wins.
priority  101
state     MASTER
interface eth0
virtual_ipaddress {
192.168.0.40 dev eth0
}
track_script {
haproxy
}
}

# vim /etc/haproxy/haproxy.cfg
global
chroot  /var/lib/haproxy
daemon
group  haproxy
log  192.168.0.11 local0
maxconn  4000
pidfile  /var/run/haproxy.pid
stats  socket /var/lib/haproxy/stats
user  haproxy

defaults
log  global
maxconn  8000
option  redispatch
retries  3
timeout  http-request 10s
timeout  queue 1m
timeout  connect 10s
timeout  client 1m
timeout  server 1m
timeout  check 10s

listen cinder_api_cluster
bind 192.168.0.40:8776
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:8776  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8776  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8776  check inter 2000 rise 2 fall 5

listen dashboard_cluster
bind 192.168.0.40:80
balance  source
capture  cookie vgnvisitor= len 32
cookie  SERVERID insert indirect nocache
mode  http
option  forwardfor
option  httpchk
option  httpclose
rspidel  ^Set-cookie:\ IP=
server ctrl01 192.168.0.41:80 cookie control01 check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:80 cookie control02 check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:80 cookie control03 check inter 2000 rise 2 fall 5

listen galera_cluster
bind 192.168.0.40:3306
balance  source
mode  tcp
option  httpchk
server ctrl01 192.168.0.41:3306  check port 9200 inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:3306  check port 9200 inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:3306  check port 9200 inter 2000 rise 2 fall 5

listen glance_api_cluster
bind 192.168.0.40:9292
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:9292  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:9292  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:9292  check inter 2000 rise 2 fall 5

listen glance_registry_cluster
bind 192.168.0.40:9191
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:9191  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:9191  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:9191  check inter 2000 rise 2 fall 5

listen keystone_admin_cluster
bind 192.168.0.40:35357
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:35357  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:35357  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:35357  check inter 2000 rise 2 fall 5

listen keystone_public_internal_cluster
bind 192.168.0.40:5000
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:5000  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:5000  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:5000  check inter 2000 rise 2 fall 5

listen nova_ec2_api_cluster
bind 192.168.0.40:8773
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:8773  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8773  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8773  check inter 2000 rise 2 fall 5

listen nova_memcached_cluster
bind 192.168.0.40:11211
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:11211  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:11211  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:11211  check inter 2000 rise 2 fall 5

listen nova_metadata_api_cluster
bind 192.168.0.40:8775
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:8775  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8775  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8775  check inter 2000 rise 2 fall 5

listen nova_osapi_cluster
bind 192.168.0.40:8774
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:8774  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8774  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8774  check inter 2000 rise 2 fall 5

listen novnc_cluster
bind 192.168.0.40:6080
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:6080  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:6080  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:6080  check inter 2000 rise 2 fall 5

listen neutron_api_cluster
bind 192.168.0.40:9696
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:9696  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:9696  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:9696  check inter 2000 rise 2 fall 5

listen heat_api_cluster
bind 192.168.0.40:8004
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:8004  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8004  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8004  check inter 2000 rise 2 fall 5

listen heat_cf_api_cluster
bind 192.168.0.40:8000
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:8000  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8000  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8000  check inter 2000 rise 2 fall 5

listen rabbit_cluster
bind 192.168.0.40:5672
balance  source
mode  tcp
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:5672  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:5672  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:5672  check inter 2000 rise 2 fall 5

# vim /etc/default/haproxy
ENABLED=1

# /etc/init.d/keepalived restart
# /etc/init.d/haproxy restart

################################   Server lb02   #####################################
# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email {
root@mycloud.com
}
notification_email_from keepalived@mycloud.com
smtp_server localhost
smtp_connect_timeout 30
router_id lb02
}
vrrp_script haproxy {
script   “killall -0 haproxy”
interval 2
weight   2
}
vrrp_instance 50 {
virtual_router_id 50

# Advert interval
advert_int 1

# for electing MASTER, highest priority wins.
priority  100
state     BACKUP
interface eth0
virtual_ipaddress {
192.168.0.40 dev eth0
}
track_script {
haproxy
}
}

# vim  /etc/haproxy/haproxy.cfg

global
chroot  /var/lib/haproxy
daemon
group  haproxy
log  192.168.0.12 local0
maxconn  4000
pidfile  /var/run/haproxy.pid
stats  socket /var/lib/haproxy/stats
user  haproxy

defaults
log  global
maxconn  8000
option  redispatch
retries  3
timeout  http-request 10s
timeout  queue 1m
timeout  connect 10s
timeout  client 1m
timeout  server 1m
timeout  check 10s

listen cinder_api_cluster
bind 192.168.0.40:8776
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:8776  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8776  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8776  check inter 2000 rise 2 fall 5

listen dashboard_cluster
bind 192.168.0.40:80
balance  source
capture  cookie vgnvisitor= len 32
cookie  SERVERID insert indirect nocache
mode  http
option  forwardfor
option  httpchk
option  httpclose
rspidel  ^Set-cookie:\ IP=
server ctrl01 192.168.0.41:80 cookie control01 check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:80 cookie control02 check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:80 cookie control03 check inter 2000 rise 2 fall 5

listen galera_cluster
bind 192.168.0.40:3306
balance  source
mode  tcp
option  httpchk
server ctrl01 192.168.0.41:3306  check port 9200 inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:3306  check port 9200 inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:3306  check port 9200 inter 2000 rise 2 fall 5

listen glance_api_cluster
bind 192.168.0.40:9292
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:9292  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:9292  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:9292  check inter 2000 rise 2 fall 5

listen glance_registry_cluster
bind 192.168.0.40:9191
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:9191  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:9191  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:9191  check inter 2000 rise 2 fall 5

listen keystone_admin_cluster
bind 192.168.0.40:35357
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:35357  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:35357  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:35357  check inter 2000 rise 2 fall 5

listen keystone_public_internal_cluster
bind 192.168.0.40:5000
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:5000  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:5000  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:5000  check inter 2000 rise 2 fall 5

listen nova_ec2_api_cluster
bind 192.168.0.40:8773
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:8773  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8773  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8773  check inter 2000 rise 2 fall 5

listen nova_memcached_cluster
bind 192.168.0.40:11211
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:11211  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:11211  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:11211  check inter 2000 rise 2 fall 5

listen nova_metadata_api_cluster
bind 192.168.0.40:8775
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:8775  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8775  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8775  check inter 2000 rise 2 fall 5

listen nova_osapi_cluster
bind 192.168.0.40:8774
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:8774  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8774  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8774  check inter 2000 rise 2 fall 5

listen novnc_cluster
bind 192.168.0.40:6080
balance  source
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:6080  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:6080  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:6080  check inter 2000 rise 2 fall 5

listen neutron_api_cluster
bind 192.168.0.40:9696
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:9696  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:9696  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:9696  check inter 2000 rise 2 fall 5

listen heat_api_cluster
bind 192.168.0.40:8004
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:8004  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8004  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8004  check inter 2000 rise 2 fall 5

listen heat_cf_api_cluster
bind 192.168.0.40:8000
balance  source
option  tcpka
option  httpchk
option  tcplog
server ctrl01 192.168.0.41:8000  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:8000  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:8000  check inter 2000 rise 2 fall 5

listen rabbit_cluster
bind 192.168.0.40:5672
balance  source
mode  tcp
option  tcpka
option  tcplog
server ctrl01 192.168.0.41:5672  check inter 2000 rise 2 fall 5
server ctrl02 192.168.0.42:5672  check inter 2000 rise 2 fall 5
server ctrl03 192.168.0.43:5672  check inter 2000 rise 2 fall 5

# vim /etc/default/haproxy
ENABLED=1

# /etc/init.d/keepalived restart
# /etc/init.d/haproxy restart

#########################################################################
############################   Controller Node Installation ########################
#########################################################################

############################  RabbitMQ Installation  ############################

Install RabbitMQ Server on all Nodes
[root@ctrl01]# apt-get install -y rabbitmq-server
[root@ctrl02]# apt-get install -y rabbitmq-server
[root@ctrl03]# apt-get install -y rabbitmq-server

Stop RabbitMQ Server to enable clustering
[root@ctrl01]# service rabbitmq-server stop
[root@ctrl02]# service rabbitmq-server stop
[root@ctrl03]# service rabbitmq-server stop

Copy base erlang on all nodes
[root@ctrl01]# scp /var/lib/rabbitmq/.erlang.cookie root@192.168.0.42:/var/lib/rabbitmq/.erlang.cookie
[root@ctrl01]# scp /var/lib/rabbitmq/.erlang.cookie root@192.168.0.43:/var/lib/rabbitmq/.erlang.cookie

Start rabbitmq-server on all nodes
[root@ctrl01]# service rabbitmq-server start
[root@ctrl02]# service rabbitmq-server start
[root@ctrl03]# service rabbitmq-server start

Check status of rabbitmq-server on all nodes
[root@ctrl01]# service rabbitmq-server status
[root@ctrl02]# service rabbitmq-server status
[root@ctrl03]# service rabbitmq-server status

Now RabbitMQ Cluster configuration

[root@ctrl02]# rabbitmqctl stop_app
[root@ctrl02]# rabbitmqctl cluster rabbit@ctrl01
[root@ctrl02]# rabbitmqctl start_app
[root@ctrl02]# rabbitmqctl cluster_status

[root@ctrl03]# rabbitmqctl stop_app
[root@ctrl03]# rabbitmqctl cluster rabbit@ctrl02
[root@ctrl03]# rabbitmqctl start_app
[root@ctrl03]# rabbitmqctl cluster_status

Hardening of RabbitMQ
[root@ctrl01]# rabbitmqctl delete_user guest
[root@ctrl01]# rabbitmqctl add_user rabbituser rabbitpassword
[root@ctrl01]# rabbitmqctl set_permissions -p / rabbituser “.*” “.*” “.*”
[root@ctrl01]# rabbitmqctl list_users
[root@ctrl01]# rabbitmqctl list_user_permissions rabbituser

#########################   Keystone Installation  #########################

Install keystone on all ctrl nodes
[root@ctrl01]# apt-get install -y keystone
[root@ctrl02]# apt-get install -y keystone
[root@ctrl03]# apt-get install -y keystone

[root@ctrl01]# rm /var/lib/keystone/keystone.db
[root@ctrl02]# rm /var/lib/keystone/keystone.db
[root@ctrl03]# rm /var/lib/keystone/keystone.db

MySQL DB Creation
[root@ctrl01]# mysql
CREATE DATABASE keystone;
GRANT ALL ON keystone.* TO ‘keystone’@’%’ IDENTIFIED BY ‘keystone’;
GRANT ALL ON keystone.* TO ‘keystone’@’localhost’ IDENTIFIED BY ‘keystone’;
quit;

[root@ctrl02]# mysql -e “show databases;”
[root@ctrl03]# mysql -e “show databases;”

[root@ctrl01]# /etc/keystone/keystone.conf
[DEFAULT]
admin_token = keystone
bind_host = 192.168.0.41
[sql]
connection = mysql://keystone:keystone@192.168.0.40/keystone
idle_timeout = 30
[token]
provider = keystone.token.providers.uuid.Provider

[root@ctrl02]# /etc/keystone/keystone.conf
[DEFAULT]
admin_token = keystone
bind_host = 192.168.0.42
[sql]
connection = mysql://keystone:keystone@192.168.0.40/keystone
idle_timeout = 30
[token]
provider = keystone.token.providers.uuid.Provider

[root@ctrl03]# /etc/keystone/keystone.conf
[DEFAULT]
admin_token = keystone
bind_host = 192.168.0.43
[sql]
connection = mysql://keystone:keystone@192.168.0.40/keystone
idle_timeout = 30
[token]
provider = keystone.token.providers.uuid.Provider

[root@ctrl01]#  vim /root/keystonerc
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=keystone
export OS_AUTH_URL=”http://192.168.0.40:5000/v2.0/&#8221;
export OS_AUTH_STRATEGY=keystone
export SERVICE_TOKEN=keystone
export SERVICE_ENDPOINT=http://192.168.0.40:35357/v2.0/

[root@ctrl01]# source keystonerc

Verify that MySQL is listening on the VIP for the Keystone database
[root@ctrl01]# mysql -h192.168.0.40 -ukeystone -pkeystone keystone
[root@ctrl02]# mysql -h192.168.0.40 -ukeystone -pkeystone keystone
[root@ctrl03]# mysql -h192.168.0.40 -ukeystone -pkeystone keystone

[root@ctrl01]# service keystone restart
[root@ctrl02]# service keystone restart
[root@ctrl03]# service keystone restart

[root@ctrl01]# keystone-manage db_sync

# Tenants
[root@ctrl01]# ADMIN_TENANT=$(get_id keystone tenant-create –name=admin)
[root@ctrl01]# SERVICE_TENANT=$(get_id keystone tenant-create –name=service)
[root@ctrl01]# DEMO_TENANT=$(get_id keystone tenant-create –name=demo)
[root@ctrl01]# INVIS_TENANT=$(get_id keystone tenant-create –name=invis)

# Users
[root@ctrl01]# ADMIN_USER=$(get_id keystone user-create –name=admin –pass=”admin” –email=admin@mycloud.com)
[root@ctrl01]# DEMO_USER=$(get_id keystone user-create –name=demo –pass=”demo” –email=demo@mycloud.com)

# Roles
[root@ctrl01]# ADMIN_ROLE=$(get_id keystone role-create –name=admin)
[root@ctrl01]# KEYSTONEADMIN_ROLE=$(get_id keystone role-create –name=KeystoneAdmin)
[root@ctrl01]# KEYSTONESERVICE_ROLE=$(get_id keystone role-create –name=KeystoneServiceAdmin)

# Add Roles to Users in Tenants
[root@ctrl01]# keystone user-role-add –user-id $ADMIN_USER –role-id $ADMIN_ROLE –tenant-id $ADMIN_TENANT
[root@ctrl01]# keystone user-role-add –user-id $ADMIN_USER –role-id $ADMIN_ROLE –tenant-id $DEMO_TENANT
[root@ctrl01]# keystone user-role-add –user-id $ADMIN_USER –role-id $KEYSTONEADMIN_ROLE –tenant-id $ADMIN_TENANT
[root@ctrl01]# keystone user-role-add –user-id $ADMIN_USER –role-id $KEYSTONESERVICE_ROLE –tenant-id $ADMIN_TENANT

# The Member role is used by Horizon
[root@ctrl01]# MEMBER_ROLE=$(get_id keystone role-create –name=Member)
[root@ctrl01]# keystone user-role-add –user-id $DEMO_USER –role-id $MEMBER_ROLE –tenant-id $DEMO_TENANT
[root@ctrl01]# keystone user-role-add –user-id $DEMO_USER –role-id $MEMBER_ROLE –tenant-id $INVIS_TENANT

# Configure service users/roles
[root@ctrl01]# NOVA_USER=$(get_id keystone user-create –name=nova –pass=nova –tenant-id $SERVICE_TENANT –email=nova@mycloud.com)
[root@ctrl01]# keystone user-role-add –tenant-id $SERVICE_TENANT –user-id $NOVA_USER –role-id $ADMIN_ROLE

[root@ctrl01]# GLANCE_USER=$(get_id keystone user-create –name=glance –pass=glance –tenant-id $SERVICE_TENANT –email=glance@mycloud.com)
[root@ctrl01]# keystone user-role-add –tenant-id $SERVICE_TENANT –user-id $GLANCE_USER –role-id $ADMIN_ROLE

[root@ctrl01]# RESELLER_ROLE=$(get_id keystone role-create –name=ResellerAdmin)
[root@ctrl01]# keystone user-role-add –tenant-id $SERVICE_TENANT –user-id $NOVA_USER –role-id $RESELLER_ROLE

[root@ctrl01]# NEUTRON_USER=$(get_id keystone user-create –name=neutron –pass=neutron –tenant-id $SERVICE_TENANT –email=neutron@mycloud.com)
[root@ctrl01]# keystone user-role-add –tenant-id $SERVICE_TENANT –user-id $NEUTRON_USER –role-id $ADMIN_ROLE

[root@ctrl01]# CINDER_USER=$(get_id keystone user-create –name=cinder –pass=cinder –tenant-id $SERVICE_TENANT –email=cinder@mycloud.com)
[root@ctrl01]# keystone user-role-add –tenant-id $SERVICE_TENANT –user-id $CINDER_USER –role-id $ADMIN_ROLE

[root@ctrl01]# HEAT_USER=$(get_id keystone user-create –name=heat –pass=heat –tenant-id $SERVICE_TENANT –email=heat@mycloud.com)
[root@ctrl01]# keystone user-role-add –tenant-id $SERVICE_TENANT –user-id $HEAT_USER –role-id $ADMIN_ROLE

######  Creating Services  ############
[root@ctrl01]# keystone service-create –name nova –type compute –description ‘OpenStack Compute Service’
[root@ctrl01]# keystone service-create –name cinder –type volume –description ‘OpenStack Volume Service’
[root@ctrl01]# keystone service-create –name glance –type image –description ‘OpenStack Image Service’
[root@ctrl01]# keystone service-create –name swift –type object-store –description ‘OpenStack Storage Service’
[root@ctrl01]# keystone service-create –name keystone –type identity –description ‘OpenStack Identity’
[root@ctrl01]# keystone service-create –name ec2 –type ec2 –description ‘OpenStack EC2 service’
[root@ctrl01]# keystone service-create –name heat-cfn –type cloudformation –description ‘Heat CloudFormation API’
[root@ctrl01]# keystone service-create –name heat –type orchestration –description ‘Heat Orchestration API’
[root@ctrl01]# keystone service-create –name neutron –type network –description ‘OpenStack Networking service’

######  Creating Endpoint  ############
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:8774/v2/$(tenant_id)s&#8217; –adminurl ‘http://192.168.0.40:8774/v2/$(tenant_id)s&#8217; –internalurl ‘http://192.168.0.40:8774/v2/$(tenant_id)s&#8217;
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:8776/v1/$(tenant_id)s&#8217; –adminurl ‘http://192.168.0.40:8776/v1/$(tenant_id)s&#8217; –internalurl ‘http://192.168.0.40:8776/v1/$(tenant_id)s&#8217;
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:9292/v2&#8217; –adminurl ‘http://192.168.0.40:9292/v2&#8217; –internalurl ‘http://192.168.0.40:9292/v2&#8217;
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:8080/v1/AUTH_$(tenant_id)s&#8217; –adminurl ‘http://192.168.0.40:8080/v1&#8217; –internalurl ‘http://192.168.0.40:8080/v1/AUTH_$(tenant_id)s&#8217;
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:5000/v2.0&#8217; –adminurl ‘http://192.168.0.40:35357/v2.0&#8217; –internalurl ‘http://192.168.0.40:5000/v2.0&#8217;
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:8773/services/Cloud&#8217; –adminurl ‘http://192.168.0.40:8773/services/Admin&#8217; –internalurl ‘http://192.168.0.40:8773/services/Cloud&#8217;
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:8000/v1&#8217; –adminurl ‘http://192.168.0.40:8000/v1&#8217; –internalurl ‘http://192.168.0.40:8000/v1&#8217;
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:8004/v1/\$(tenant_id)s’ –adminurl ‘http://192.168.0.40:8004/v1/\$(tenant_id)s’ –internalurl ‘http://192.168.0.40:8004/v1/\$(tenant_id)s’
[root@ctrl01]# keystone endpoint-create –region RegionOne –service-id “id of service” –publicurl ‘http://192.168.0.40:9696/&#8217; –adminurl ‘http://192.168.0.40:9696/&#8217; –internalurl ‘http://192.168.0.40:9696/&#8217;

########### Verify the Identity Service ###################
[root@ctrl01]# keystone –os-username=admin –os-password=admin –os-auth-url=http://192.168.0.40:35357/v2.0 token-get
[root@ctrl01]# keystone –os-username=admin –os-password=admin –os-tenant-name=admin –os-auth-url=http://192.168/0/40:35357/v2.0 token-get
[root@ctrl01]# source keystonerc
[root@ctrl01]# keystone user-list
[root@ctrl01]# keystone user-list
[root@ctrl01]# keystone role-list
[root@ctrl01]# keystone service-list
[root@ctrl01]# keystone endpoint-list

###################################################################################
##########################  Glance Installation  ##########################################
##################################################################################
[root@ctrl01]# apt-get install -y glance-api glance-registry
[root@ctrl02]# apt-get install -y glance-api glance-registry
[root@ctrl02]# apt-get install -y glance-api glance-registry

[root@ctrl01]# rm /var/lib/glance/glance.sqlite
[root@ctrl02]# rm /var/lib/glance/glance.sqlite
[root@ctrl03]# rm /var/lib/glance/glance.sqlite

[root@ctrl01]# mysql
CREATE DATABASE glance;
GRANT ALL ON glance.* TO ‘glance’@’%’ IDENTIFIED BY ‘glance’;
GRANT ALL ON glance.* TO ‘glance’@’localhost’ IDENTIFIED BY ‘glance’;
quit;

## on 1,2,3##
[root@ctrl01]# /etc/glance/glance-api.conf
[DEFAULT]
default_store = cinder
bind_host = 192.168.0.41
sql_connection=mysql://glance:glance@192.168.0.40/glance
sql_idle_timeout = 30
registry_host = 192.168.0.40

[keystone_authtoken]
auth_host = 192.168.0.40
auth_port = 35357
auth_protocol = http
admin_tenant_name = services
admin_user = glance
admin_password = keystone

[paste_deploy]
flavor=keystone+cachemanagement

[root@ctrl01]#  /etc/glance/glance-cache.conf
[DEFAULT]
registry_host = 192.168.0.40
auth_url = http://192.168.0.40:5000/v2.0/
admin_tenant_name = services
admin_user = glance
admin_password = keystone

[root@ctrl01]# /etc/glance/glance-registry.conf
[DEFAULT]
bind_host = 192.168.0.41
sql_connection=mysql://glance:glance_pass@192.168.0.40/glance
sql_idle_timeout = 30

[keystone_authtoken]
auth_host = 192.168.0.40
auth_port = 35357
auth_protocol = http
admin_tenant_name = services
admin_user = glance
admin_password = keystone

[paste_deploy]
flavor=keystone

[root@ctrl01]# service glance-api restart; service glance-registry restart

########### no need to repeate on all nodes #########
[root@ctrl01]# glance-manage version_control 0
[root@ctrl01]# glance-manage db_sync
[root@ctrl01]# glance image-list

######################################################################################
######################  Neutron Installation  ################################################
######################################################################################
[root@ctrl01]# apt-get install -y neutron-server neutron-plugin-openvswitch neutron-plugin-openvswitch-agent openvswitch-datapath-dkms neutron-dhcp-agent
[root@ctrl02]# apt-get install -y neutron-server neutron-plugin-openvswitch neutron-plugin-openvswitch-agent openvswitch-datapath-dkms neutron-dhcp-agent
[root@ctrl03]# apt-get install -y neutron-server neutron-plugin-openvswitch neutron-plugin-openvswitch-agent openvswitch-datapath-dkms neutron-dhcp-agent

[root@ctrl01]# mysql
CREATE DATABASE neutron;
GRANT ALL ON neutron.* TO ‘neutron’@’%’ IDENTIFIED BY ‘neutron’;
GRANT ALL ON neutron.* TO ‘neutron’@’localhost’ IDENTIFIED BY ‘neutron’;
quit;

[root@ctrl01]# service openvswitch-switch status
[root@ctrl02]# service openvswitch-switch status
[root@ctrl03]# service openvswitch-switch status

[root@ctrl01]# service openvswitch-switch start
[root@ctrl02]# service openvswitch-switch start
[root@ctrl03]# service openvswitch-switch start

[root@ctrl01]# ovs-vsctl add-br br-int
[root@ctrl01]# ovs-vsctl add-br br-ex
[root@ctrl01]# ovs-vsctl add-port br-ex eth1

[root@ctrl02]# ovs-vsctl add-br br-int
[root@ctrl02]# ovs-vsctl add-br br-ex
[root@ctrl02]# ovs-vsctl add-port br-ex eth1

[root@ctrl03]# ovs-vsctl add-br br-int
[root@ctrl03]# ovs-vsctl add-br br-ex
[root@ctrl03]# ovs-vsctl add-port br-ex eth1

## on all control nodes ##
[root@ctrl01]# /etc/neutron/neutron.conf
[DEFAULT]
bind_host = 192.168.0.41
rabbit_userid=rabbituser
rabbit_password=rabbitpassword
rabbit_ha_queues=True
rabbit_hosts=ctrl01:5672,ctrl02:5672,ctrl03:5672

[keystone_authtoken]
auth_host = 192.168.0.40
auth_port = 35357
auth_protocol = http
admin_tenant_name = services
admin_user = neutron
admin_password = keystone
signing_dir = /var/lib/neutron/keystone-signing

[database]
sql_connection=mysql://neutron:neutron@192.168.0.40/neutron

[root@ctrl01]# /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
[ovs]
tenant_network_type = vlan
network_vlan_ranges = physnet1:223:225
bridge_mappings = physnet1:br-ex

[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

[root@ctrl01]# /etc/neutron/dhcp_agent.ini
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver

[root@ctrl01]# service neutron-server restart; service neutron-dhcp-agent restart; service neutron-plugin-openvswitch-agent restart
[root@ctrl02]# service neutron-server restart; service neutron-dhcp-agent restart; service neutron-plugin-openvswitch-agent restart
[root@ctrl03]# service neutron-server restart; service neutron-dhcp-agent restart; service neutron-plugin-openvswitch-agent restart

################################################################################
#####################  Nova Installation  #############################################
################################################################################
[root@ctrl01]# apt-get install -y nova-api nova-conductor nova-scheduler nova-novncproxy nova-consoleauth
[root@ctrl02]# apt-get install -y nova-api nova-conductor nova-scheduler nova-novncproxy nova-consoleauth
[root@ctrl03]# apt-get install -y nova-api nova-conductor nova-scheduler nova-novncproxy nova-consoleauth

[root@ctrl01]# mysql
CREATE DATABASE nova;
GRANT ALL ON nova.* TO ‘nova’@’%’ IDENTIFIED BY ‘nova’;
GRANT ALL ON nova.* TO ‘nova’@’localhost’ IDENTIFIED BY ‘nova’;
quit;

## on all Ctrl01.02.03 ##
[root@ctrl01]# /etc/nova/api-paste.ini
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = 192.168.0.40
auth_port = 35357
auth_protocol = http
admin_tenant_name = services
admin_user = nova
admin_password = keystone
signing_dir = /tmp/keystone-signing-nova
auth_version = v2.0

[root@ctrl01]# /etc/nova/nova.conf
[DEFAULT]
sql_idle_timeout=30
network_api_class=nova.network.neutronv2.api.API
neutron_url=http://192.168.0.40:9696
neutron_admin_auth_url=http://192.168.0.40:35357/v2.0
neutron_auth_strategy=keystone
neutron_admin_tenant_name=services
neutron_admin_username=neutron
neutron_admin_password=keystone
firewall_driver=nova.virt.firewall.NoopFirewallDriver
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
image_service=nova.image.glance.GlanceImageService
rpc_backend=nova.rpc.impl_kombu
rabbit_ha_queues=True
rabbit_hosts=ctrl01:5672,ctrl02:5672,ctrl03:5672
glance_api_servers=192.168.0.40:9292
service_down_time=60
rabbit_port=5672
rabbit_virtual_host=/
sql_connection=mysql://nova:nova@192.168.0.40/nova
memcached_servers=192.168.0.41:11211,192.168.0.42:11211,192.168.0.43:11211
rabbit_userid=rabbituser
rabbit_password=rabbitpassword
ec2_listen=192.168.0.41
enabled_apis=ec2,osapi_compute,metadata
osapi_compute_listen=192.168.0.41
volume_api_class=nova.volume.cinder.API
auth_strategy=keystone
rootwrap_config= /etc/nova/rootwrap.conf
novncproxy_port=6080
novncproxy_host=0.0.0.0
novncproxy_base_url=http://192.168.0.40:6080/vnc_auto.html
novncproxy_host=192.168.0.41

[root@ctrl01]# nova-manage db sync

[root@ctrl01]# cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done
[root@ctrl02]# cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done
[root@ctrl03]# cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done

[root@ctrl01]#  nova-manage service list
[root@ctrl01]#  service nova-api status

[root@ctrl01]# /etc/libvirt/qemu.conf
cgroup_device_acl = [
“/dev/null”, “/dev/full”, “/dev/zero”,
“/dev/random”, “/dev/urandom”,
“/dev/ptmx”, “/dev/kvm”, “/dev/kqemu”,
“/dev/rtc”, “/dev/hpet”
]

[root@ctrl01]#  /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = “none”

[root@ctrl01]# /etc/init/libvirt-bin.conf
env libvirtd_opts=”-d -l”

[root@ctrl01]# /etc/default/libvirt-bin
libvirtd_opts=”-d -l”

[root@ctrl01]# service libvirt-bin restart

#################################################################################
####################  Cinder Installation  ##############################################
#################################################################################

[root@ctrl01]# apt-get install -y cinder-api cinder-scheduler
[root@ctrl02]# apt-get install -y cinder-api cinder-scheduler
[root@ctrl03]# apt-get install -y cinder-api cinder-scheduler

[root@ctrl01]# mysql
CREATE DATABASE cinder;
GRANT ALL ON cinder.* TO ‘cinder’@’%’ IDENTIFIED BY ‘cinder’;
GRANT ALL ON cinder.* TO ‘cinder’@’localhost’ IDENTIFIED BY ‘cinder’;
quit;

[root@ctrl01]# /etc/cinder/api-paste.ini
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
service_protocol = http
service_host = 192.168.0.40
service_port = 5000
auth_host = 192.168.0.40
auth_port = 35357
auth_protocol = http
admin_tenant_name = services
admin_user = cinder
admin_password = keystone
signing_dir = /var/lib/cinder/keystone-signing

[root@ctrl01]# /etc/cinder/cinder.conf
[DEFAULT]
sql_idle_timeout=30
rabbit_ha_queues=True
rabbit_hosts=ctrl01:5672,ctrl02:5672,ctrl03:5672
rabbit_userid=rabbituser
rabbit_password=rabbitpassword
sql_connection = mysql://cinder:cinder@192.168.0.40/cinder
osapi_volume_listen = 192.168.0.41
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
volume_group = cinder-volumes
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes

[root@ctrl01]# cinder-manage db sync
[root@ctrl01]# service cinder-api restart; service cinder-scheduler restart
[root@ctrl02]# service cinder-api restart; service cinder-scheduler restart
[root@ctrl03]# service cinder-api restart; service cinder-scheduler restart

#############################################################################
################################ Heat Installation  #################################
##############################################################################

[root@ctrl01]# apt-get install -y heat-api heat-api-cfn heat-api-cloudwatch heat-engine
[root@ctrl02]# apt-get install -y heat-api heat-api-cfn heat-api-cloudwatch heat-engine
[root@ctrl03]# apt-get install -y heat-api heat-api-cfn heat-api-cloudwatch heat-engine

[root@ctrl01]# mysql
CREATE DATABASE heat;
GRANT ALL ON heat.* TO ‘heat’@’%’ IDENTIFIED BY ‘heat’;
GRANT ALL ON heat.* TO ‘heat’@’localhost’ IDENTIFIED BY ‘heat’;
quit;

## on all controler nodes
[root@ctrl01]# /etc/heat/api-paste.ini
[filter:authtoken]
paste.filter_factory = heat.common.auth_token:filter_factory
service_host = 192.168.0.40
service_port = 5000
service_protocol = http
auth_host = 192.168.0.40
auth_port = 35357
auth_protocol = http
auth_uri = http://192.168.0.40:5000/v2.0/
admin_token = keystone

[root@ctrl01]# /etc/heat/heat.conf
[DEFAULT]
heat_metadata_server_url=http://192.168.0.40:8000
heat_waitcondition_server_url=http://192.168.0.40:8000/v1/waitcondition
heat_watch_server_url=http://192.168.0.40:8003
sql_idle_timeout=30
sql_connection = mysql://heat:heat@192.168.0.40/heat
rabbit_ha_queues=True
rabbit_hosts=ctrl01:5672,ctrl02:5672,ctrl03:5672
rabbit_userid=rabbituser
rabbit_password=rabbitpassword
log_dir=/var/log/heat
[ec2authtoken]
http://192.168.0.40:5000/v2.0/ec2tokens
[heat_api]
bind_host=192.168.0.41
[heat_api_cfn]
bind_host=192.168.0.41
[heat_api_cloudwatch]
bind_host=192.168.0.41

[root@ctrl01]# heat-manage db_sync
[root@ctrl01]# service heat-api restart; service heat-api-cfn restart; service heat-api-cloudwatch restart; service heat-engine restart
[root@ctrl02]# service heat-api restart; service heat-api-cfn restart; service heat-api-cloudwatch restart; service heat-engine restart
[root@ctrl03]# service heat-api restart; service heat-api-cfn restart; service heat-api-cloudwatch restart; service heat-engine restart

###################################################################
####################  Horizon Installation  ###############################
###################################################################
[root@ctrl01]# apt-get install -y memcached libapache2-mod-wsgi openstack-dashboard
[root@ctrl02]# apt-get install -y memcached libapache2-mod-wsgi openstack-dashboard
[root@ctrl03]# apt-get install -y memcached libapache2-mod-wsgi openstack-dashboard

[root@ctrl01]# /etc/openstack-dashboard/local_settings.py
replace 127.0.0.1 with 192.168.0.40

[root@ctrl02]# /etc/openstack-dashboard/local_settings.py
replace 127.0.0.1 with 192.168.0.40

[root@ctrl03]# /etc/openstack-dashboard/local_settings.py
replace 127.0.0.1 with 192.168.0.40

[root@ctrl01]# /etc/memcached.conf
-l 192.168.0.41

[root@ctrl02]# /etc/memcached.conf
-l 192.168.0.41

[root@ctrl03]# /etc/memcached.conf
-l 192.168.0.41

[root@ctrl01]# service apache2 restart; service memcached restart
[root@ctrl02]# service apache2 restart; service memcached restart
[root@ctrl03]# service apache2 restart; service memcached restart

##############################################################################
####################  Compute Node Installation  ######################################
##############################################################################

######################### Neutron Installation ####################################
[root@wrk01]# apt-get -y install neutron-plugin-openvswitch neutron-plugin-openvswitch-agent
[root@wrk01]# service openvswitch-switch status
[root@wrk01]# service openvswitch-switch start
[root@wrk01]# ovs-vsctl add-br br-int
[root@wrk01]# ovs-vsctl add-br br-ex
[root@wrk01]# ovs-vsctl add-port br-ex eth1
[root@wrk01]# /etc/neutron/neutron.conf
[DEFAULT]
rabbit_userid=rabbituser
rabbit_password=rabbitpassword
rabbit_ha_queues=True
rabbit_hosts=ctrl01:5672,ctrl02:5672,ctrl03:5672

[keystone_authtoken]
auth_host = 192.168.0.40
auth_port = 35357
auth_protocol = http
admin_tenant_name = services
admin_user = neutron
admin_password = keystone
signing_dir = /var/lib/neutron/keystone-signing

[root@wrk01]# /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
[ovs]
tenant_network_type = vlan
network_vlan_ranges = physnet1:223:225
bridge_mappings = physnet1:br-ex

# Using Neutron Security Groups instead of Nova Security Groups
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

[root@wrk01]# service neutron-plugin-openvswitch-agent restart

##############################   Nova Installation  #################################

[root@wrk01]#  apt-get install -y nova-compute

[root@wrk01]# modprobe kvm_intel
[root@wrk01]# service nova-compute restart
[root@wrk01]#  /etc/nova/nova.conf
[DEFAULT]
force_config_drive=true
network_api_class=nova.network.neutronv2.api.API
neutron_url=http://192.168.0.40:9696
neutron_admin_auth_url=http://192.168.0.40:35357/v2.0
neutron_auth_strategy=keystone
neutron_admin_tenant_name=services
neutron_admin_username=neutron
neutron_admin_password=keystone
firewall_driver=nova.virt.firewall.NoopFirewallDriver
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
rabbit_ha_queues=True
rabbit_hosts=ctrl01:5672,ctrl02:5672,ctrl03:5672
glance_api_servers=192.168.0.40:9292
sql_connection=mysql://nova:nova@192.168.0.40/nova
memcached_servers=192.168.0.40:11211
rabbit_userid=rabbituser
rabbit_password=rabbitpassword
volume_api_class=nova.volume.cinder.API
auth_strategy=keystone
rootwrap_config= /etc/nova/rootwrap.conf
live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
vnc_enabled=true
vncserver_listen=192.168.0.51
vncserver_proxyclient_address=192.168.0.51
novncproxy_base_url=http://192.168.0.40:6080/vnc_auto.html

[root@wrk01]# /etc/nova/nova-compute.conf
[DEFAULT]
libvirt_type=kvm
compute_driver=libvirt.LibvirtDriver

[root@wrk01]# service nova-compute restart

[root@wrk01]#  nova-manage service list

[root@wrk01]# /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = “none”

[root@wrk01]# /etc/init/libvirt-bin.conf
env libvirtd_opts=”-d -l”

[root@wrk01]# /etc/default/libvirt-bin
libvirtd_opts=”-d -l”

[root@wrk01]#  service libvirt-bin restart

#######################  Cinder Installation  #########################################

[root@wrk01]#  apt-get install -y cinder-volume
[root@wrk01]# /etc/cinder/cinder.conf
[DEFAULT]
iscsi_ip_address=192.168.0.51
rabbit_ha_queues=True
rabbit_hosts=ctrl01:5672,ctrl02:5672,ctrl03:5672
rabbit_userid=rabbitser
rabbit_password=rabbitpassword
sql_connection = mysql://cinder:cinder@192.168.0.40/cinder
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
volume_group = cinder-volumes
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes

[root@wrk01]# service cinder-volume restart; service tgt restart

NIC Teaming/Bonding For BigData

If you are running your cluster and facing any network performance related issue and you not yet team your network interface card then this is the best time to do This.

In this post i am taking an example that you are using servers with 4 NIC of 1Gbps . 2 of them is connected to one switch and rest of them is connected to second switch, both of these switch is connected to core switch with 10Gbps link.

Modify your network interface config files

#vi  /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
USERCTL=no
ONBOOT=yes
MASTER=bond0
SLAVE=yes
TYPE=Ethernet
BOOTPROTO=none

#vi /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1
USERCTL=no
ONBOOT=yes
MASTER=bond0
SLAVE=yes
TYPE=Ethernet
BOOTPROTO=none

#vi /etc/sysconfig/network-scripts/ifcfg-eth2
DEVICE=eth2
USERCTL=no
ONBOOT=yes
MASTER=bond0
SLAVE=yes
TYPE=Ethernet
BOOTPROTO=none

#vi /etc/sysconfig/network-scripts/ifcfg-eth3
DEVICE=eth3
USERCTL=no
ONBOOT=yes
MASTER=bond0
SLAVE=yes
TYPE=Ethernet
BOOTPROTO=none

Create a New file for Bonding
# vi /etc/sysconfig/network-scripts/ifcfg-bond0
DEVICE=bond0
IPADDR=YOUR-IP
NETMASK=YOUR-NET-MASK
NETWORK=YOUR-NETWORK
GATEWAY=YOUR-GATEWAY
ONBOOT=yes
BOOTPROTO=none
USERCTL=no

Modify your network file

# vi /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=HostName.DomainName.com

Create a module for kernel
#vi /etc/modprobe.d/bonding.conf
alias bond0 bonding
options bond0 miimon=80 mode=0 lacp_rate=1

#modprobe bonding

#reboot

How to Tune Operating System to Perform BigData Best

Hi All,

Just paste the listed contents in a file with 755 permission and execute it via shell, reboot your machine and all parameters is setup for you.

 

###  Disable ipv6  ###
sysctl -w net.ipv6.conf.all.disable_ipv6=1
sysctl -w net.ipv6.conf.default.disable_ipv6=1
sysctl -w net.ipv6.conf.lo.disable_ipv6=1

###  Increase socket connection waiting queue  ###
sysctl -w net.core.somaxconn=1024

###  Increase kernel packet buffer  ###
sysctl -w net.core.netdev_max_backlog=4000

### Turn off selective acks not needed for low latency networks  ###
sysctl -w  net.ipv4.tcp_sack=0
sysctl -w  net.ipv4.tcp_sack=0
###  Make keepalives useful  ###
sysctl -w net.ipv4.tcp_keepalive_time=600
sysctl -w net.ipv4.tcp_keepalive_probes=5
sysctl -w net.ipv4.tcp_keepalive_intvl=15

###  wait for socket close ack ###
sysctl -w  net.ipv4.tcp_fin_timeout=30

###  resend a packet  ###
sysctl -w  net.ipv4.tcp_retries2=10

###  resend socket close acks  ###
sysctl -w  net.ipv4.tcp_synack_retries=3
sysctl -w net.ipv4.route.flush=1

###  
sysctl -w vm.swappiness=5
###  Increase Buffer  ###
sysctl -w  net.ipv4.tcp_rmem=’32768 436600 4194304′
sysctl -w  net.ipv4.tcp_wmem=’32768 436600 4194304′

####  immediatly subsequent connections use these values  ###  
sysctl -p /etc/sysctl.conf
sysctl -w net.ipv4.route.flush=1

###  Drop Firewall Rules  ###    
chkconfig –del iptables ;chkconfig –del ip6tables

###  Disable ipv6  ###
echo “NETWORKING_IPV6=no” >> /etc/sysconfig/network
echo “install ipv6 /bin/true” >> /etc/modprobe.d/disable-ipv6.conf

###  Set NTP  ###
echo “#! /bin/sh” >> /etc/cron.hourly/ntpdate-sync
echo “/usr/sbin/ntpdate YOUT-NTP-SERVER-IP” >> /etc/cron.hourly/ntpdate-sync
chmod 755 /etc/cron.hourly/ntpdate-sync
chown root:root /etc/cron.hourly/ntpdate-sync
ntpdate -u YOUT-NTP-SERVER-IP

Making Cloud Images

##########################################################
Installing and checking KVM
##########################################################

You can create image locally on your desktop or on server the only thing you need is fully functional kvm
Install KVM on your system
# sudo apt-get install qemu-kvm libvirt-bin bridge-utils kvm
or
# yum install qemu-kvm libvirt-bin bridge-utils kvm

Ckeck your hardware support KVM Virtualisactio or not
# kvm-ok

INFO: /dev/kvm exists
KVM acceleration can be used  <==means you can use kvm

######################################################
Start Creating Images for your openstack Cloud
######################################################
Create a raw image which emulates the Hard Drive
# kvm-img create -f qcow2 centos63.img 5G

Start the Virtual Machine booting from the CD
# kvm -m 1024 -cdrom CentOS-6.3-x86_64.iso -drive file=centos63.img,if=virtio,index=0 -boot d -net nic -net user -nographic -vnc :1

Connect to the Virtual Machine through VNC
# vncviewer yourip:1

(select openssh-client and curl during installation)

Start the Virtual Machine to boot from the Hard Drive
# kvm -m 512 -drive file=centos63.img,if=virtio,index=0,boot=on -boot c -net nic -net user -nographic -vnc :1

ReConnect to the Virtual Machine through VNC
# vncviewer yourip:1

For SSH key injection add the following lines before ‘exit 0′ in the file /etc/rc.local
    mkdir -p /root/.ssh
    echo >> /root/.ssh/authorized_keys
    curl -m 10 -s http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key | grep ‘ssh-rsa’ >> /root/.ssh/authorized_keys
    echo “AUTHORIZED_KEYS:”
    echo “************************”
    cat /root/.ssh/authorized_keys
    echo “************************”

# vi /etc/sysconfig/network-scripts/ifcfg-eth0
    DEVICE=”eth0″
    BOOTPROTO=dhcp
    NM_CONTROLLED=”yes”
    ONBOOT=”yes”

# cp /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth1

# vi /etc/sysconfig/network-scripts/ifcfg-eth1
    DEVICE=”eth1″
    BOOTPROTO=dhcp
    NM_CONTROLLED=”yes”
    ONBOOT=”yes”

Delete the Rules created for network
# rm -rf /etc/udev/rules.d/70-persistent-net.rules

# Shutdown -h now

#################################################
Uploading images in Glance Database
#################################################

Adding the Image to Glance
# glance add name=”CentOS6_3″ is_public=true container_format=ovf disk_format=qcow2 < /tmp/images/centos63.img

(Added new image with ID: 991baaf9-cc0d-4183-a201-8facdf1a1430)
(If you receive ID means your image is uploaded successfully)

# glance index
ID                                   Name                           Disk Format          Container Format     Size          
———————————— —————————— ——————– ——————– ————–
eeabb5b8-7c11-4174-9568-bcd9e7181097 RedHat60-Base                  qcow2                ovf                       809762816
a463e74a-4851-42dd-8cd0-c76b7c0f5827 Redhat60                       qcow2                ovf                      1070596096
92d4533c-a2fe-4c87-9e77-82b99e6bae18 CentOS63_x64                   qcow2                ovf                       984219648

OpenStack Folsom Installation with Quantum and OVS

###################################################################################################

Configure your own OpenStack Cloud Folsom With Quantum Network
###################################################################################################

3 Servers
Server 1 = > running network controller and nova-compute
Server 2 = > running nova-compute, Cinder, rabbit-mq, mysql, keystone, glance, swift
Server 3 = > running nova-compute

All servers are having 4 lan card, eth0 is connected to public switch and rest are connected to private switch.
eth3 of Server 1 is also connected to public switch for br-ex. all node eth2 is for br-int

10.10.x.x is for public access

172.24.0.x is for private network

172.24.x.x is reserved for tenant Network

#######################################################################################
#################################  Network Node Setup #################################
#######################################################################################

######################## CONFIGURE NETWORK ####################################

#vi /etc/network/interfaces
#### Management Network
auto eth0
iface eth0 inet static
address 10.10.1.2
netmask 255.255.255.0
gateway 10.10.1.254
dns-nameservers 10.10.2.2

###### VMs Networks with OVS in tunnel mode
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
down ifconfig $IFACE down

############ Public Bridge
auto eth3
iface eth3 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
down ifconfig $IFACE down

#service networking restart

#sed -i -r ‘s/^\s*#(net\.ipv4\.ip_forward=1.*)/\1/’ /etc/sysctl.conf
#echo 1 > /proc/sys/net/ipv4/ip_forward

#apt-get install -y ntp
#vi /etc/ntp.conf
server ntp.ubuntu.com iburst
server 127.127.1.0
fudge 127.127.1.0 stratum 10
#service ntp restart

#vi /etc/apt/sources.list.d/folsom.list
#deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/folsom main
#apt-get install -y python-software-properties
#apt-get install ubuntu-cloud-keyring
#apt-get update
#apt-get upgrade
#apt-get dist-upgrade

#apt-get install -y kvm libvirt-bin pm-utils
#vi /etc/libvirt/qemu.conf
cgroup_device_acl = [
“/dev/null”, “/dev/full”, “/dev/zero”,
“/dev/random”, “/dev/urandom”,
“/dev/ptmx”, “/dev/kvm”, “/dev/kqemu”,
“/dev/rtc”, “/dev/hpet”,”/dev/net/tun”,
]

#virsh net-destroy default
#virsh net-undefine default
#vi /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = “none”

#vi /etc/init/libvirt-bin.conf
env libvirtd_opts=”-d -l”

# vi /etc/default/libvirt-bin
libvirtd_opts=”-d -l”

# service libvirt-bin restart

######################### OPENVSWITCH INSTALLATION ###########################

#apt-get install openv-switch
#apt-get install aptitude apt-show-versions ntp ntpdate vim kvm libvirt-bin vlan virtinst virt-manager virt-viewer openssh-server iperf pv openvswitch-controller openvswitch-brcompat openvswitch-switch openvswitch-datapath-source
#virsh net-destroy default
#virsh net-autostart –disable default
#service qemu-kvm stop
#vi /etc/default/openvswitch-switch
BRCOMPAT=yes

#aptitude purge ebtables
#apt-get install -y openvswitch-datapath-source
#apt-get install -y quantum-server quantum-plugin-openvswitch
#module-assistant auto-install openvswitch-datapath
#service openvswitch-switch start
#lsmod |grep br
#rmmod bridge
#service openvswitch-switch restart
#service openvswitch-controller restart
#lsmod |grep br
#init 6

#apt-get -y install  python-cliff python-pyparsing python-cliff  quantum-l3-agent quantum-dhcp-agent quantum-plugin-openvswitch-agent

#vi /etc/quantum/l3_agent.ini
[DEFAULT]
interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver
auth_url = http://10.10.1.1:35357/v2.0
auth_region = RegionOne
admin_tenant_name = service
admin_user = quantum
admin_password = password
root_helper = sudo /usr/bin/quantum-rootwrap /etc/quantum/rootwrap.conf
use_namespaces = False
router_id = d3f4cb2d-e64c-4292-b594-156eff390279
external_network_bridge = br-ex
metadata_ip = 10.10.1.1
metadata_port = 8775
polling_interval = 3
rabbit_host = 10.10.1.1
rabbit_userid = guest
rabbit_password = password

#vi /etc/quantum/dhcp_agent.ini
[DEFAULT]
state_path = /var/lib/quantum
interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = quantum.agent.linux.dhcp.Dnsmasq
use_namespaces = False
root_helper = sudo /usr/bin/quantum-rootwrap /etc/quantum/rootwrap.conf
rabbit_host = 10.10.1.1
rabbit_userid = guest
rabbit_password = password

#vi /etc/quantum/plugin/ovs_quantum_plugin.ini
[DATABASE]
sql_connection = mysql://quantum:password@10.10.1.1:3306/quantum
reconnect_interval = 2
[OVS]
tenant_network_type = gre
tunnel_id_ranges = 1:1000
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip = 172.24.0.2
enable_tunneling = True
[AGENT]
polling_interval = 2
root_helper = sudo /usr/bin/quantum-rootwrap /etc/quantum/rootwrap.conf

#vi /etc/quantum/api-paste.ini
[composite:quantum]
use = egg:Paste#urlmap
/: quantumversions
/v2.0: quantumapi_v2_0

[composite:quantumapi_v2_0]
use = call:quantum.auth:pipeline_factory
noauth = extensions quantumapiapp_v2_0
keystone = authtoken keystonecontext extensions quantumapiapp_v2_0

[filter:keystonecontext]
paste.filter_factory = quantum.auth:QuantumKeystoneContext.factory

[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
auth_host = 10.10.1.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = quantum
admin_password = password

[filter:extensions]
paste.filter_factory = quantum.extensions.extensions:plugin_aware_extension_middleware_factory

[app:quantumversions]
paste.app_factory = quantum.api.versions:Versions.factory

[app:quantumapiapp_v2_0]
paste.app_factory = quantum.api.v2.router:APIRouter.factory

#cd /etc/init.d/; for i in $( ls quantum-* ); do sudo service $i restart; done

###################################################################################
#################################  Server 2 Setup #################################
###################################################################################

################################# Configure Network Interfaces ####################
#vi /etc/network/interfaces
## Management Network
auto eth0
iface eth0 inet static
address 10.10.1.1
netmask 255.255.255.0
gateway 10.10.1.254
dns-nameservers 10.10.2.2

## VMs Networks with OVS in tunnel mode
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
down ifconfig $IFACE down

############################ Add the Folsom Repo ###################################

#vi /etc/apt/sources.list.d/folsom.list
#deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/folsom main
#apt-get install -y python-software-properties
#apt-get install ubuntu-cloud-keyring
#apt-get update
#apt-get upgrade
#apt-get dist-upgrade

##################### Install libvirt and configure for auto failover ##############
#apt-get install -y kvm libvirt-bin pm-utils

#vi /etc/libvirt/qemu.conf
cgroup_device_acl = [
“/dev/null”, “/dev/full”, “/dev/zero”,
“/dev/random”, “/dev/urandom”,
“/dev/ptmx”, “/dev/kvm”, “/dev/kqemu”,
“/dev/rtc”, “/dev/hpet”,”/dev/net/tun”,
]

#virsh net-destroy default
#virsh net-undefine default

#vi /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = “none”

#vi /etc/init/libvirt-bin.conf
env libvirtd_opts=”-d -l”

# vi /etc/default/libvirt-bin
libvirtd_opts=”-d -l”

# service libvirt-bin restart

############################## Install OpenVSwitch ####################################

#apt-get install openv-switch
#apt-get install aptitude apt-show-versions ntp ntpdate vim kvm libvirt-bin vlan virtinst virt-manager virt-viewer openssh-server iperf pv openvswitch-controller openvswitch-brcompat openvswitch-switch openvswitch-datapath-source
#virsh net-destroy default
#virsh net-autostart –disable default
#service qemu-kvm stop
#vi /etc/default/openvswitch-switch
BRCOMPAT=yes

#aptitude purge ebtables

#apt-get install -y openvswitch-datapath-source
#apt-get install -y quantum-server quantum-plugin-openvswitch
#module-assistant auto-install openvswitch-datapath
#service openvswitch-switch start
#lsmod |grep br
#rmmod bridge
#service openvswitch-switch restart
#service openvswitch-controller restart
#lsmod |grep br
#init 6

########################## Install and Configure MySQL DataBase  #######################
#apt-get -y install mysql-server python-mysqldb
#sed -i ‘s/127.0.0.1/0.0.0.0/g’ /etc/mysql/my.cnf

########  CREATING AND ADDING THE DATABASES FOR ALL THE SERVICES ##########

#service mysql restart
#mysql -u root -ppassword
>CREATE DATABASE nova;
>GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@’%’ IDENTIFIED BY ‘password’;
>CREATE DATABASE cinder;
>GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder’@’%’ IDENTIFIED BY ‘password’;
>CREATE DATABASE glance;
>GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@’%’ IDENTIFIED BY ‘password’;
>CREATE DATABASE keystone;
>GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@’%’ IDENTIFIED BY ‘password’;
>CREATE DATABASE quantum;
>GRANT ALL PRIVILEGES ON quantum.* TO ‘quantum’@’%’ IDENTIFIED BY ‘password’;
>FLUSH PRIVILEGES;
>quit

###################  Installing RabbitMQ Server ##########################################
#apt-get -y install rabbitmq-server
#rabbitmqctl change_password guest password

########### Installing and creating Keystone ###############################################

#apt-get -y install keystone python-keystone python-keystoneclient
#vi /etc/keystone/keystone.conf
[DEFAULT]
admin_token = password
bind_host = 0.0.0.0
public_port = 5000
admin_port = 35357
compute_port = 8774
verbose = False
debug = False
log_file = keystone.log
log_dir = /var/log/keystone
use_syslog = False
log_config = /etc/keystone/logging.conf
[sql]
connection = mysql://keystone:password@10.10.1.1:3306/keystone
idle_timeout = 200

##############################  USER Tenant Management  ##############################

#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 tenant-create –name admin –description “admin tenant”
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-create –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96  –name admin –pass password –enabled true
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 role-create –name admin

#keystone –token admin  –endpoint http://10.10.1.1:35357/v2.0 user-role-add –user-id b2b1a863065a42528cfb91e15406faec –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –role-id 316491486529494b93fdcedc23163fe5
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 tenant-create –name service –description “Service Tenant” –enabled true
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-create –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –name glance –pass password –enabled true
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-role-add –user-id cc8062a4d6134864b0721b5da84723c1 –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –role-id 316491486529494b93fdcedc23163fe5
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-create –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –name nova –pass password –enabled true
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-role-add –user-id 3e7865e8357242b4a43c430f538e915b –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –role-id 316491486529494b93fdcedc23163fe5
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-create –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –name ec2 –pass password –enabled true
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-role-add –user-id 491bd45a22ee4067b384be0175e0518e –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –role-id 316491486529494b93fdcedc23163fe5
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-create –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –name swift –pass password –enabled true
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0 user-role-add –user-id e69f241820a54c21a5dc3ec68e7cc3d5 –tenant_id 15f57d196ef74b5a90b26f80f8d6aa96 –role-id 316491486529494b93fdcedc23163fe5

#keystone –token admin  –endpoint http://10.10.1.1:35357/v2.0/  service-create  –name=keystone  –type=identity  –description=”Keystone Identity Service”
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0/ endpoint-create  –region RegionOne –service_id=c3fbf11d6e8743c5b011119e9446c181 –publicurl=http://10.10.1.1:5000/v2.0  –internalurl=http://172.24.0.1:5000/v2.0 –adminurl=http://10.10.1.1:35357/v2.0
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0/ service-create  –name=nova  –type=compute –description=”Nova Compute Service”
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0/ endpoint-create –region RegionOne –service_id=14604eb988524774ab13c8d7518e2ad3 –publicurl=’http://10.10.1.1:8774/v2/%(tenant_id)s&#8217; –internalurl=’http://172.24.0.1:8774/v2/%(tenant_id)s&#8217; –adminurl=’http://10.10.1.1:8774/v2/%(tenant_id)s&#8217;
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0/ service-create  –name=volume  –type=volume  –description=”Nova Volume Service”
#keystone –token admin  –endpoint http://10.10.1.1:35357/v2.0/ endpoint-create  –region RegionOne  –service_id=a02d0ccf05c646f288b447b7faa803cd  –publicurl=’http://10.10.1.1:8776/v1/%(tenant_id)s&#8217; –internalurl=’http://172.24.0.1:8776/v1/%(tenant_id)s&#8217; –adminurl=’http://10.10.1.1:8776/v1/%(tenant_id)s&#8217;
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0/ service-create  –name=glance –type=image  –description=”Glance Image Service”
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0/ endpoint-create –region RegionOne –service_id=015934dce28f41808aba8955e45a5d75 –publicurl=http://10.10.1.1:9292/v1 –internalurl=http://172.24.0.1:9292/v1 –adminurl=http://10.10.1.1:9292/v1
#keystone  –token admin  –endpoint http://10.10.1.1:35357/v2.0/  service-create  –name=ec2  –type=ec2  –description=”EC2 Compatibility Layer”
#keystone –token admin  –endpoint http://10.10.1.1:35357/v2.0/ endpoint-create  –region RegionOne  –service_id=73c24186b946489c9bc68fbb5d1292aa –publicurl=http://10.10.1.1:8773/services/Cloud  –internalurl=http://172.24.0.1:8773/services/Cloud –adminurl=http://10.10.1.1:8773/services/Admin
#keystone  –token admin –endpoint http://10.10.1.1:35357/v2.0/ service-create –name=swift –type=object-store –description=”Object Storage Service”
#keystone –token admin –endpoint http://10.10.1.1:35357/v2.0/ endpoint-create –region RegionOne –service_id=1356fb657c5d41edb1ca89e845f19b90 –publicurl ‘http://10.10.1.1:8888/v1/AUTH_%(tenant_id)s&#8217; –adminurl ‘http://10.10.1.1:8888/v1&#8217; –internalurl ‘http://172.24.0.1:8888/v1/AUTH_%(tenant_id)s&#8217;

#keystone user-create –name cinder –pass cinder   –email vivekraghuwanshi@gmail.com
#keystone user-create –name quantum –pass quantum   –email vivekraghuwanshi@gmail.com

#keystone service-create –name cinder –type volume –description “Cinder Service”
#keystone endpoint-create –region RegionOne –service_id 7d88142f2fa24ba0a0414492ace95225 –publicurl “http://10.10.1.1:8776/v1/%(tenant_id)s&#8221; –adminurl “http://10.10.1.1:8776/v1/%(tenant_id)s&#8221; –internalurl “http://172.24.0.1:8776/v1/%(tenant_id)s&#8221;
#keystone user-role-add –user_id 62ba2e5471fc437cb4637cab9c38fefb –role_id 316491486529494b93fdcedc23163fe5 –tenant_id 10d998664e3a4232baaf8c0555ba6438

#keystone service-create –name quantum –type network –description “OpenStack Networking service”
#keystone endpoint-create –region  RegionOne –service-id 2291c2c398534b378d9317f4a0e3c884 –publicurl “http://10.10.1.1:9696/&#8221; –adminurl “http://10.10.1.1:9696/&#8221; –internalurl “http://172.24.0.1:9696/&#8221;
#keystone user-role-add –user_id 0ad8bcc66bd44e9296af508ed0d9b8e4 –role_id 316491486529494b93fdcedc23163fe5 –tenant_id 10d998664e3a4232baaf8c0555ba6438

##################################### Testing the credentials ##############################
#apt-get install curl openssl
#curl -d ‘{“auth”: {“tenantName”: “admin”, “passwordCredentials”:{“username”: “admin”, “password”: “password”}}}’ -H “Content-type: application/json” http://10.10.1.1:35357/v2.0/tokens | python -mjson.tool
#curl -d ‘{“auth”: {“tenantName”: “admin”, “passwordCredentials”:{“username”: “admin”, “password”: “password”}}}’ -H “Content-type: application/json” http://10.10.1.1:35357/v2.0/tokens | python -mjson.tool
#curl -d ‘{“auth”: {“tenantName”: “admin”, “passwordCredentials”:{“username”: “admin”, “password”: “password”}}}’ -H “Content-type: application/json” http://172.24.0.1:35357/v2.0/tokens | python -mjson.tool
#curl -d ‘{“auth”: {“tenantName”: “admin”, “passwordCredentials”:{“username”: “admin”, “password”: “password”}}}’ -H “Content-type: application/json” http://10.10.1.1:35357/v2.0/tokens | python -mjson.tool
#curl -d ‘{“auth”: {“tenantName”: “admin”, “passwordCredentials”:{“username”: “admin”, “password”: “password”}}}’ -H “Content-type: application/json” http://10.10.1.1:35357/v2.0/tokens | python -mjson.tool

########################################  SWIFT-STORAGE Configuration ###################
#apt-get install swift swift-proxy swift-account swift-container swift-object xfsprogs curl python-pastedeploy
#apt-get install swift openssh-server  rsync memcached python-netifaces python-xattr python-memcache
#mkdir -p /etc/swift
#chown -R swift:swift /etc/swift/
#vi /etc/swift/swift.conf
[swift-hash]
# random unique string that can never change (DO NOT LOSE)
swift_hash_path_suffix = fLIbertYgibbitZ

swift-init proxy start
#vi /etc/memcached.conf
-l 172.24.0.1

#service memcached restart
#vi /etc/swift/proxy-server.conf
export ADMIN_TOKEN=password
export OS_USERNAME=swift
export OS_PASSWORD=password
export OS_TENANT_NAME=admin
export OS_AUTH_URL=http://10.10.1.1:5000/v2.0/
#swift-init proxy start
#swift-init main start
#swift-init rest start

#swift-init proxy restart

#fdisk /dev/sdb
create new partation “/dev/sdb1”
#partprobe
#mkfs.xfs -i size=1024 -f /dev/sdb1
#mkdir -p /srv/node/sdb1
#mount /srv/node/sdb1
#echo “/dev/sdb1 /srv/node/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0” >> /etc/fstab
#chown -R swift:swift /srv/node
#vi /etc/rsyncd.conf
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = 172.24.0.1
[account]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/account.lock
[container]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/container.lock
[object]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/object.lock

#scp /etc/rsyncd.conf 172.24.0.2:/etc/
#scp /etc/rsyncd.conf 172.24.0.3:/etc/

#service rsync start

#vi /etc/swift/account-server.conf
[DEFAULT]
bind_ip = 172.24.0.1
workers = 3

[pipeline:main]
pipeline = account-server

[app:account-server]
use = egg:swift#account

[account-replicator]

[account-auditor]

[account-reaper]

#vi /etc/swift/container-server.conf
[DEFAULT]
bind_ip = 172.24.0.1
workers = 3

[pipeline:main]
pipeline = container-server

[app:container-server]
use = egg:swift#container

[container-replicator]

[container-updater]

[container-auditor]

#vi /etc/swift/object-server.conf
[DEFAULT]
bind_ip = 172.24.0.1
workers = 3

[pipeline:main]
pipeline = object-server

[app:object-server]
use = egg:swift#object

[object-replicator]

[object-updater]

[object-auditor]

#swift-init object-server start
#swift-init object-replicator start
#swift-init object-updater start
#swift-init object-auditor start
#swift-init container-server start
#swift-init container-replicator start
#swift-init container-updater start
#swift-init container-auditor start
#swift-init account-server start
#swift-init account-replicator start
#swift-init account-auditor start

#cd /etc/swift
#openssl req -new -x509 -nodes -out cert.crt -keyout cert.key

#service memcached restart
#service memcached status
#vi /etc/swift/proxy-server.conf
-l 172.24.0.1

#swift-ring-builder account.builder create 18 3 1
#swift-ring-builder container.builder create 18 3 1
#swift-ring-builder object.builder create 18 3 1
#swift-ring-builder account.builder add z1-172.24.0.1:6002/sdb1 100
#swift-ring-builder container.builder add z1-172.24.0.1:6001/sdb1 100
#swift-ring-builder object.builder add z1-172.24.0.1:6000/sdb1 100
#swift-ring-builder account.builder add z2-172.24.0.2:6002/sdb1 100
#swift-ring-builder container.builder add z2-172.24.0.2:6001/sdb1 100
#swift-ring-builder object.builder add z2-172.24.0.2:6000/sdb1 100
#swift-ring-builder account.builder add z3-172.24.0.3:6002/sdb1 100
#swift-ring-builder container.builder add z3-172.24.0.3:6001/sdb1 100
#swift-ring-builder object.builder add z3-172.24.0.3:6000/sdb1 100
#swift-ring-builder account.builder
#swift-ring-builder container.builder
#swift-ring-builder object.builder
#swift-ring-builder account.builder rebalance
#swift-ring-builder container.builder rebalance
#swift-ring-builder object.builder rebalance
#scp -r *.gz 172.24.0.2:/etc/swift/
#scp -r *.gz 172.24.0.3:/etc/swift/
#chown -R swift:swift /etc/swift
#swift-init proxy start

#swift-init proxy status
#swift-init main start
#swift-init main restart
#swift-init rest restart

#fdisk /dev/sdb
#pvcreate /dev/sdb5

#pvdisplay
#pvdisplay /dev/sdb5
#partprobe
#fdisk -l
#pvcreate /dev/sdb5
#fdisk /dev/sdb

#vgcreate cinder-volumes /dev/sdb5

##############################   GLANCE IMAGE-SERVICE   #############################
#apt-get -y install glance glance-api python-glanceclient glance-common
#vi /etc/glance/glance-api.conf
sql_connection = mysql://glance:password@10.10.1.1/glance
admin_tenant_name = service
admin_user = glance
admin_password = password
swift_store_auth_version = 2
swift_store_auth_address = 127.0.0.1:5000/v2.0/
swift_store_user = admin:admin
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
swift_store_container = glance
swift_store_create_container_on_put = True
swift_store_large_object_size = 5120
swift_store_large_object_chunk_size = 200

[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = glance
admin_password = password

#vi /etc/glance/glance-registry.conf
sql_connection = mysql://glance:password@10.10.1.1/glance
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = glance
admin_password = password

#vi etc/glance/glance-api.conf
notifier_strategy = rabbit
rabbit_password = password

#service glance-api restart && service glance-registry restart
#glance-manage db_sync
#glance image-list

###############################  Installing Nova and components   #####################
#apt-get -y install nova-api nova-cert nova-common nova-scheduler python-nova python-novaclient nova-consoleauth novnc nova-compute-kvm novnc nova-novncproxy

#vi /etc/nova/api-paste.ini
auth_host = 10.10.1.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = password
signing_dirname = /tmp/keystone-signing-nova

#vi /etc/nova/nova.conf
[DEFAULT]
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/run/lock/nova
verbose=False
api_paste_config=/etc/nova/api-paste.ini
scheduler_driver=nova.scheduler.simple.SimpleScheduler
s3_host=10.10.1.1
ec2_host=10.10.1.1
ec2_dmz_host=10.10.1.1
rabbit_host=10.10.1.1
cc_host=10.10.1.1
nova_url=http://10.10.1.1:8774/v1.1/
sql_connection=mysql://nova:password@10.10.1.1/nova
ec2_url=http://10.10.1.1:8773/services/Cloud
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf

rabbit_password=password
##Auth
use_deprecated_auth=false
auth_strategy=keystone
keystone_ec2_url=http://10.10.1.1:5000/v2.0/ec2tokens
## Imaging service
glance_api_servers=10.10.1.1:9292
image_service=nova.image.glance.GlanceImageService

## Vnc configuration
novnc_enabled=true
novncproxy_base_url=http://10.10.1.1:6080/vnc_auto.html
novncproxy_port=6080
vncserver_proxyclient_address=10.10.1.1
vncserver_listen=0.0.0.0

## Network settings
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://10.10.1.1:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=password
quantum_admin_auth_url=http://10.10.1.1:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver

## Compute #
compute_driver=libvirt.LibvirtDriver

## Cinder #
volume_api_class=nova.volume.cinder.API
osapi_volume_listen_port=5900

# vi /etc/nova/nova-compute
[DEFAULT]
libvirt_type=kvm
libvirt_ovs_bridge=br-int
libvirt_vif_type=ethernet
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
libvirt_use_virtio_for_bridges=True

#nova-manage db sync

#cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done

#apt-get install -y openvswitch-switch quantum-plugin-openvswitch-agent
# /etc/init.d/openvswitch-switch restart
# ovs-vsctl add-br br-int
#ovs-vsctl add-port br-int eth1
#ip addr flush dev br-int
#ip addr add 172.24.0.1/255.255.0.0 dev br-int
#ip link set br-int up

##################################   Installing and Configuring Quantum Server ###############
# apt-get -y install quantum-server python-cliff python-pyparsing quantum-plugin-openvswitch-agent

# vi /etc/quantum/quantum.conf
core_plugin = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2
auth_strategy = keystone
fake_rabbit = False
rabbit_password = password

#vi /etc/quantum/plugin/ovs_quantum_plugin.ini
[DATABASE]
sql_connection = mysql://quantum:password@10.10.1.1:3306/quantum
reconnect_interval = 2
[OVS]
tenant_network_type = gre
tunnel_id_ranges = 1:1000
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip = 172.24.0.1
enable_tunneling = True
[AGENT]
polling_interval = 2
root_helper = sudo /usr/bin/quantum-rootwrap /etc/quantum/rootwrap.conf

# cd /etc/init.d/; for i in $( ls quantum-* ); do sudo service $i restart; done

###########################   Installing Cinder  ##############################

#apt-get install -y cinder-api cinder-scheduler cinder-volume iscsitarget open-iscsi iscsitarget-dkms python-cinderclient
#sed -i ‘s/false/true/g’ /etc/default/iscsitarget
#service iscsitarget start
#service open-iscsi start

#vi /etc/cinder/cinder.conf
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
sql_connection = mysql://cinder:password@10.10.1.1:3306/cinder
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = ietadm
volume_name_template = volume-%s
volume_group = cinder-volumes
rabbit_password = password
logdir = /var/log/cinder
verbose = False
auth_strategy = keystone

vi /etc/cinder/api-paste.ini
service_protocol = http
service_host = 10.10.1.1
service_port = 5000
auth_host = 10.10.1.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = cinder
admin_password = password

• Create the volume :

#cinder-manage db sync
#cd /etc/init.d/; for i in $( ls cinder-* ); do sudo service $i restart; done

#apt-get -y install apache2 libapache2-mod-wsgi openstack-dashboard memcached python-memcache

###################################################################################
#################################  Server 3 Setup #################################
###################################################################################

#vi /etc/network/interfaces
## Management Network
auto eth0
iface eth0 inet static
address 10.10.1.3
netmask 255.255.255.0
gateway 10.10.1.254
dns-nameservers 10.10.2.2

## VMs Networks with OVS in tunnel mode
auto eth1
iface eth1 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
down ifconfig $IFACE down

#vi /etc/apt/sources.list.d/folsom.list
#deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/folsom main
#apt-get install -y python-software-properties
#apt-get install ubuntu-cloud-keyring
#apt-get update
#apt-get upgrade
#apt-get dist-upgrade

#apt-get install -y kvm libvirt-bin pm-utils

#vi /etc/libvirt/qemu.conf
cgroup_device_acl = [
“/dev/null”, “/dev/full”, “/dev/zero”,
“/dev/random”, “/dev/urandom”,
“/dev/ptmx”, “/dev/kvm”, “/dev/kqemu”,
“/dev/rtc”, “/dev/hpet”,”/dev/net/tun”,
]

#virsh net-destroy default
#virsh net-undefine default

#vi /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = “none”

#vi /etc/init/libvirt-bin.conf
env libvirtd_opts=”-d -l”

# vi /etc/default/libvirt-bin
libvirtd_opts=”-d -l”

# service libvirt-bin restart

OPENVSWITCH

#apt-get install openv-switch
#apt-get install aptitude apt-show-versions ntp ntpdate vim kvm libvirt-bin vlan virtinst virt-manager virt-viewer openssh-server iperf pv openvswitch-controller openvswitch-brcompat openvswitch-switch openvswitch-datapath-source
#virsh net-destroy default
#virsh net-autostart –disable default
#service qemu-kvm stop
#vi /etc/default/openvswitch-switch
BRCOMPAT=yes

#aptitude purge ebtables

#apt-get install -y openvswitch-datapath-source
#apt-get install -y quantum-server quantum-plugin-openvswitch
#module-assistant auto-install openvswitch-datapath
#service openvswitch-switch start
#lsmod |grep br
#rmmod bridge
#service openvswitch-switch restart
#service openvswitch-controller restart
#lsmod |grep br
#init 6

#apt-get -y install  nova-common python-nova python-novaclient novnc nova-compute-kvm novnc nova-novncproxy

#vi /etc/nova/api-paste.ini
auth_host = 10.10.1.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = password
signing_dirname = /tmp/keystone-signing-nova

#vi /etc/nova/nova.conf
[DEFAULT]
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/run/lock/nova
verbose=False
api_paste_config=/etc/nova/api-paste.ini
scheduler_driver=nova.scheduler.simple.SimpleScheduler
s3_host=10.10.1.1
ec2_host=10.10.1.1
ec2_dmz_host=10.10.1.1
rabbit_host=10.10.1.1
cc_host=10.10.1.1
nova_url=http://10.10.1.1:8774/v1.1/
sql_connection=mysql://nova:password@10.10.1.1/nova
ec2_url=http://10.10.1.1:8773/services/Cloud
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf

rabbit_password=password
##Auth
use_deprecated_auth=false
auth_strategy=keystone
keystone_ec2_url=http://10.10.1.1:5000/v2.0/ec2tokens
## Imaging service
glance_api_servers=10.10.1.1:9292
image_service=nova.image.glance.GlanceImageService

## Vnc configuration
novnc_enabled=true
novncproxy_base_url=http://10.10.1.1:6080/vnc_auto.html
novncproxy_port=6080
vncserver_proxyclient_address=10.10.1.1
vncserver_listen=0.0.0.0

## Network settings
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://10.10.1.1:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=password
quantum_admin_auth_url=http://10.10.1.1:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver

## Compute #
compute_driver=libvirt.LibvirtDriver

## Cinder #
volume_api_class=nova.volume.cinder.API
osapi_volume_listen_port=5900

# vi /etc/nova/nova-compute
[DEFAULT]
libvirt_type=kvm
libvirt_ovs_bridge=br-int
libvirt_vif_type=ethernet
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
libvirt_use_virtio_for_bridges=True

#nova-manage db sync

#cd /etc/init.d/; for i in $( ls nova-* ); do sudo service $i restart; done

####################################################################################
#########################Quantum Network Configuration #############################
####################################################################################

######################### Creating Internal Network ################################
quantum net-create pvt1 –tenant-id 6415b015581c4506a46e97170395a598 –provider:network_type gre –provider:segmentation_id 1
quantum net-create pvt2 –tenant-id 6415b015581c4506a46e97170395a598 –provider:network_type gre –provider:segmentation_id 2
quantum subnet-create –tenant-id 6415b015581c4506a46e97170395a598   pvt1 172.24.1.0/24
quantum subnet-create –tenant-id 6415b015581c4506a46e97170395a598   pvt2 172.24.2.0/24
quantum router-create –tenant_id 6415b015581c4506a46e97170395a598 router1
quantum router-interface-add f76cf2cc-1a3f-4a33-9b90-e6828ec7c647 4b38dbb4-7bf2-415c-894f-a65cb2f0d291
quantum router-interface-add f76cf2cc-1a3f-4a33-9b90-e6828ec7c647 30fb451c-5497-4f1f-ac9e-27c0f9771162

######################### Creating External Network ################################
quantum net-create pub1 –tenant-id 6415b015581c4506a46e97170395a598 –router:external=True –shared True
quantum subnet-create –ip_version 4 –allocation-pool start=10.10.1.11,end=10.10.1.100 –gateway 10.10.1.254 da3937d5-116c-4d6c-ac53-26f139a3efb5 10.10.1.0/24 — –enable_dhcp=False
quantum router-gateway-set f76cf2cc-1a3f-4a33-9b90-e6828ec7c647 a6e7e63b-0e26-4d0c-8952-e5b1c7d98dcf
ip addr flush dev br-ex
ip addr add 10.10.1.6/255.255.255.0 dev br-ex
ip link set br-ex up

############################# Assigning Floating IP to Instances #############################

# quantum net-list
+————————————–+————–+————————————–+
| id                                   | name         | subnets                              |
+————————————–+————–+————————————–+
| 83f45055-7dc1-4f56-b4d2-7e01441d930a | pvt1         | d3281526-248c-4e97-9e21-83dc3366d55f |
| 8e35221b-e7e1-4599-ba81-e1d983a1eb01 | pvt2         | eeadeb47-b582-4874-b38f-436b117ecd39 |
| a1fc652c-b02a-415c-92a5-ffd550edff86 | pub1         | bac5f268-672e-47e9-97b9-7ae5bf745835 |
+————————————–+————–+————————————–+

# quantum subnet-list
+————————————–+——+—————-+————————————————-+
| id                                   | name | cidr           | allocation_pools                                |
+————————————–+——+—————-+————————————————-+
| bac5f268-672e-47e9-97b9-7ae5bf745835 |      | 10.10.1.0/24   | {“start”: “10.10.1.11”, “end”: “10.10.1.100”}   |
| d3281526-248c-4e97-9e21-83dc3366d55f |      | 172.24.1.0/24  | {“start”: “172.24.1.2”, “end”: “172.24.1.254”}  |
| eeadeb47-b582-4874-b38f-436b117ecd39 |      | 172.24.2.0/24  | {“start”: “172.24.2.2”, “end”: “172.24.2.254”}  |
+————————————–+——+—————-+————————————————-+

# quantum router-list
+————————————–+———+——————————————————–+
| id                                   | name    | external_gateway_info                                  |
+————————————–+———+——————————————————–+
| ee85ae67-e8b3-4fd9-8045-ea0668c5bd17 | router1 | {“network_id”: “a1fc652c-b02a-415c-92a5-ffd550edff86”} |
+————————————–+———+——————————————————–+

# quantum port-list — –device_id ee85ae67-e8b3-4fd9-8045-ea0668c5bd17
+————————————–+——+——————-+————————————————————————————-+
| id                                   | name | mac_address       | fixed_ips                                                                           |
+————————————–+——+——————-+————————————————————————————-+
| 5547983a-6d64-4a6d-8e49-2e0eafd5ac44 |      | fa:16:3e:a9:41:63 | {“subnet_id”: “d3281526-248c-4e97-9e21-83dc3366d55f”, “ip_address”: “172.24.1.1”}   |
| c4c526a6-070c-45a0-ab10-6c06e47f66ae |      | fa:16:3e:95:23:47 | {“subnet_id”: “bac5f268-672e-47e9-97b9-7ae5bf745835”, “ip_address”: “10.10.1.11”}   |
| f1b76b55-f24b-4a3f-9572-80a38247993b |      | fa:16:3e:87:1b:f3 | {“subnet_id”: “eeadeb47-b582-4874-b38f-436b117ecd39”, “ip_address”: “172.24.2.1”}   |
+————————————–+——+——————-+————————————————————————————-+

# quantum floatingip-create PUB1
Created a new floatingip:
+———————+————————————–+
| Field               | Value                                |
+———————+————————————–+
| fixed_ip_address    |                                      |
| floating_ip_address | 10.10.1.12                           |
| floating_network_id | a1fc652c-b02a-415c-92a5-ffd550edff86 |
| id                  | 14225a28-b8fc-4f67-b4a9-659046390ac3 |
| port_id             |                                      |
| router_id           |                                      |
| tenant_id           | 6415b015581c4506a46e97170395a598     |
+———————+————————————–+

# nova list
+————————————–+————+——–+————————————————–+
| ID                                   | Name       | Status | Networks                                         |
+————————————–+————+——–+————————————————–+
| 5a56ce9d-adf7-4400-85ba-1fb6f718a63b | RepoServer | ACTIVE | pvt1=172.24.1.4; pvt2=172.24.2.4                 |
| fcebfbe7-6571-4c32-b133-960c984ce3a4 | redhat1    | ACTIVE | pvt1=172.24.1.5; pvt2=172.24.2.5                 |
| 458348b0-6e28-4c46-aed2-38b2f00d698b | redhat2    | ACTIVE | pvt1=172.24.1.6; pvt2=172.24.2.6                 |
+————————————–+————+——–+————————————————–+

# quantum port-list — –device_id 5a56ce9d-adf7-4400-85ba-1fb6f718a63b
+————————————–+——+——————-+———————————————————————————–+
| id                                   | name | mac_address       | fixed_ips                                                                         |
+————————————–+——+——————-+———————————————————————————–+
| 7b691ee7-25aa-4ba3-af6c-955f31e67f44 |      | fa:16:3e:17:86:81 | {“subnet_id”: “eeadeb47-b582-4874-b38f-436b117ecd39”, “ip_address”: “172.24.2.4”} |
| a62cc9b9-e281-403a-8abc-d4d04634ebdf |      | fa:16:3e:66:f6:28 | {“subnet_id”: “d3281526-248c-4e97-9e21-83dc3366d55f”, “ip_address”: “172.24.1.4”} |
+————————————–+——+——————-+———————————————————————————–+

# quantum floatingip-associate 14225a28-b8fc-4f67-b4a9-659046390ac3 7b691ee7-25aa-4ba3-af6c-955f31e67f44
Associated floatingip 14225a28-b8fc-4f67-b4a9-659046390ac3

# quantum floatingip-show 14225a28-b8fc-4f67-b4a9-659046390ac3
+———————+————————————–+
| Field               | Value                                |
+———————+————————————–+
| fixed_ip_address    | 172.24.2.4                           |
| floating_ip_address | 10.10.1.12                           |
| floating_network_id | a1fc652c-b02a-415c-92a5-ffd550edff86 |
| id                  | 14225a28-b8fc-4f67-b4a9-659046390ac3 |
| port_id             | 7b691ee7-25aa-4ba3-af6c-955f31e67f44 |
| router_id           | ee85ae67-e8b3-4fd9-8045-ea0668c5bd17 |
| tenant_id           | 6415b015581c4506a46e97170395a598     |
+———————+————————————–+

#