2.2. Cài đặt Requirement packages
- Giả lập trên Vmware ESXi 6.0
- CentOS 7.4 Server 64bit 1804
- Thiết lập Hostname
hostnamectl set-hostname controller
- Thiết lập IP
echo "Setup IP ens160"
nmcli c modify ens160 ipv4.addresses 10.10.10.61/24
nmcli c modify ens160 ipv4.gateway 10.10.10.1
nmcli c modify ens160 ipv4.dns 8.8.8.8
nmcli c modify ens160 ipv4.method manual
nmcli con mod ens160 connection.autoconnect yes
echo "Setup IP ens192"
nmcli c modify ens192 ipv4.addresses 10.10.11.61/24
nmcli c modify ens192 ipv4.method manual
nmcli con mod ens192 connection.autoconnect yes
echo "Setup IP ens224"
nmcli c modify ens224 ipv4.addresses 10.10.12.61/24
nmcli c modify ens224 ipv4.method manual
nmcli con mod ens224 connection.autoconnect yes
echo "Disable Firewall & SElinux"
sudo systemctl disable firewalld
sudo systemctl stop firewalld
sudo systemctl disable NetworkManager
sudo systemctl stop NetworkManager
sudo systemctl enable network
sudo systemctl start network
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
- Khai báo repo và cài đặt các package cho OpenStack Queens
echo '[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.2/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1' >> /etc/yum.repos.d/MariaDB.repo
yum install -y epel-release
yum update -y
yum install -y centos-release-openstack-queens \
open-vm-tools python2-PyMySQL vim telnet wget curl
yum install -y python-openstackclient openstack-selinux
yum upgrade -y
- Setup timezone về Ho_Chi_Minh
rm -f /etc/localtime
ln -s /usr/share/zoneinfo/Asia/Ho_Chi_Minh /etc/localtime
- Bổ sung cmd_log
curl -Lso- https://raw.githubusercontent.com/nhanhoadocs/scripts/master/Utilities/cmdlog.sh | bash
- Option bổ sung: Cài đặt collector sidecard để đẩy log lên Graylog
yum install wget -y
wget https://raw.githubusercontent.com/nhanhoadocs/scripts/master/Utilities/graylog-collector-sidecar.sh
chmod +x graylog-collector-sidecar.sh
bash graylog-collector-sidecar.sh
Nhập IP management của Graylog Server và IP management của Server
- Reboot Server
init 6
- Thiết lập Hostname
hostnamectl set-hostname compute1
- Thiết lập IP
echo "Setup IP ens160"
nmcli c modify ens160 ipv4.addresses 10.10.10.62/24
nmcli c modify ens160 ipv4.gateway 10.10.10.1
nmcli c modify ens160 ipv4.dns 8.8.8.8
nmcli c modify ens160 ipv4.method manual
nmcli con mod ens160 connection.autoconnect yes
echo "Setup IP ens192"
nmcli c modify ens192 ipv4.addresses 10.10.11.62/24
nmcli c modify ens192 ipv4.method manual
nmcli con mod ens192 connection.autoconnect yes
echo "Setup IP ens224"
nmcli c modify ens224 ipv4.addresses 10.10.12.62/24
nmcli c modify ens224 ipv4.method manual
nmcli con mod ens224 connection.autoconnect yes
echo "Disable Firewall & SElinux"
sudo systemctl disable firewalld
sudo systemctl stop firewalld
sudo systemctl disable NetworkManager
sudo systemctl stop NetworkManager
sudo systemctl enable network
sudo systemctl start network
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
- Khai báo repo và cài đặt các package cho OpenStack Queens
echo '[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.2/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1' >> /etc/yum.repos.d/MariaDB.repo
yum install -y epel-release
yum update -y
yum install -y centos-release-openstack-queens \
open-vm-tools python2-PyMySQL vim telnet wget curl
yum install -y python-openstackclient openstack-selinux
yum upgrade -y
- Setup timezone về Ho_Chi_Minh
rm -f /etc/localtime
ln -s /usr/share/zoneinfo/Asia/Ho_Chi_Minh /etc/localtime
- Bổ sung cmd_log
curl -Lso- https://raw.githubusercontent.com/nhanhoadocs/scripts/master/Utilities/cmdlog.sh | bash
- Option bổ sung: Cài đặt collector sidecard để đẩy log lên Graylog
yum install wget -y
wget https://raw.githubusercontent.com/nhanhoadocs/scripts/master/Utilities/graylog-collector-sidecar.sh
chmod +x graylog-collector-sidecar.sh
bash graylog-collector-sidecar.sh
Nhập IP management của Graylog Server và IP management của Server
- Reboot Server
init 6
- Thiết lập Hostname
hostnamectl set-hostname compute2
- Thiết lập IP
echo "Setup IP ens160"
nmcli c modify ens160 ipv4.addresses 10.10.10.63/24
nmcli c modify ens160 ipv4.gateway 10.10.10.1
nmcli c modify ens160 ipv4.dns 8.8.8.8
nmcli c modify ens160 ipv4.method manual
nmcli con mod ens160 connection.autoconnect yes
echo "Setup IP ens192"
nmcli c modify ens192 ipv4.addresses 10.10.11.63/24
nmcli c modify ens192 ipv4.method manual
nmcli con mod ens192 connection.autoconnect yes
echo "Setup IP ens224"
nmcli c modify ens224 ipv4.addresses 10.10.12.63/24
nmcli c modify ens224 ipv4.method manual
nmcli con mod ens224 connection.autoconnect yes
echo "Disable Firewall & SElinux"
sudo systemctl disable firewalld
sudo systemctl stop firewalld
sudo systemctl disable NetworkManager
sudo systemctl stop NetworkManager
sudo systemctl enable network
sudo systemctl start network
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
- Khai báo repo và cài đặt các package cho OpenStack Queens
echo '[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.2/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1' >> /etc/yum.repos.d/MariaDB.repo
yum install -y epel-release
yum update -y
yum install -y centos-release-openstack-queens \
open-vm-tools python2-PyMySQL vim telnet wget curl
yum install -y python-openstackclient openstack-selinux
yum upgrade -y
- Setup timezone về Ho_Chi_Minh
rm -f /etc/localtime
ln -s /usr/share/zoneinfo/Asia/Ho_Chi_Minh /etc/localtime
- Bổ sung cmd_log
curl -Lso- https://raw.githubusercontent.com/nhanhoadocs/scripts/master/Utilities/cmdlog.sh | bash
- Option bổ sung: Cài đặt collector sidecard để đẩy log lên Graylog
yum install wget -y
wget https://raw.githubusercontent.com/nhanhoadocs/scripts/master/Utilities/graylog-collector-sidecar.sh
chmod +x graylog-collector-sidecar.sh
bash graylog-collector-sidecar.sh
Nhập IP management của Graylog Server và IP management của Server
- Reboot Server
init 6
Trong bài lab này sẽ sử dụng Node Controller làm NTPD Server
- Cài đặt package
yum install -y chrony
- Backup file cấu hình
mv /etc/chrony.{conf,conf.bk}
- Cấu hình cho chrony
cat << EOF >> /etc/chrony.conf
server 0.vn.pool.ntp.org iburst
allow 10.10.10.0/24
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
EOF
- Enable và start chronyd
systemctl start chronyd
systemctl enable chronyd
- Cài đặt package
yum install -y chrony
- Backup file cấu hình
mv /etc/chrony.{conf,conf.bk}
- Cấu hình cho chrony
cat << EOF >> /etc/chrony.conf
server 10.10.10.61 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
EOF
- Enable và start chronyd
systemctl start chronyd
systemctl enable chronyd
- Cài đặt package
yum install -y chrony
- Backup file cấu hình
mv /etc/chrony.{conf,conf.bk}
- Cấu hình cho chrony
cat << EOF >> /etc/chrony.conf
server 10.10.10.61 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
EOF
- Enable và start chronyd
systemctl start chronyd
systemctl enable chronyd
- Kiểm tra đồng bộ thời gian từ NTPD-Server tương tự như trên Compute1
- Cài đặt package
yum install mariadb mariadb-server -y
- Thêm config của mysql cho openstack
cat << EOF >> /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 10.10.10.61
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF
- Enable và start MySQL
systemctl enable mariadb.service
systemctl start mariadb.service
- Cài đặt passwd cho MySQL
mysql_secure_installation <<EOF
y
passla123
passla123
y
y
y
y
EOF
- Cài đặt package
yum install rabbitmq-server -y
- Cấu hình cho rabbitmq-server
rabbitmq-plugins enable rabbitmq_management
systemctl restart rabbitmq-server
curl -O http://localhost:15672/cli/rabbitmqadmin
chmod a+x rabbitmqadmin
mv rabbitmqadmin /usr/sbin/
rabbitmqadmin list users
- Enable và start rabbitmq-server
systemctl start rabbitmq-server
systemctl enable rabbitmq-server
systemctl status rabbitmq-server
- Tạo user và gán quyền
rabbitmqctl add_user openstack passla123
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
systemctl enable rabbitmq-server.service
rabbitmqctl set_user_tags openstack administrator
- Kiểm tra user vừa tạo
rabbitmqadmin list users
- Đăng nhập vào Dashboard quản trị của Rabbit-mq
http://10.10.10.61:15672
user: openstack
password: passla123
- Cài đặt package
yum install memcached python-memcached -y
- Cấu hình cho memcached
sed -i "s/-l 127.0.0.1,::1/-l 10.10.10.61/g" /etc/sysconfig/memcached
- Enable và start memcached
systemctl enable memcached.service
systemctl start memcached.service
- Đăng nhập vào MySQL
mysql -uroot -p
- Create DB và User cho Keystone
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'passla123';
FLUSH PRIVILEGES;
exit;
- Cài đặt package
yum install -y openstack-keystone httpd mod_wsgi
- Backup cấu hình
mv /etc/keystone/keystone.{conf,conf.bk}
- Cấu hình cho Keystone
cat << EOF >> /etc/keystone/keystone.conf
[DEFAULT]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[credential]
[database]
connection = mysql+pymysql://keystone:[email protected]/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[federation]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
#rabbit_retry_interval = 1
#rabbit_retry_backoff = 2
#amqp_durable_queues = true
#rabbit_ha_queues = true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
[policy]
[profiler]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[signing]
[token]
provider = fernet
[tokenless_auth]
[trust]
EOF
- Phân quyền lại config file
chown root:keystone /etc/keystone/keystone.conf
- Đồng bộ database cho keystone
su -s /bin/sh -c "keystone-manage db_sync" keystone
- Thiết lập Fernet key
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
- Thiết lập boostrap cho Keystone
keystone-manage bootstrap --bootstrap-password passla123 \
--bootstrap-admin-url http://10.10.10.61:5000/v3/ \
--bootstrap-internal-url http://10.10.10.61:5000/v3/ \
--bootstrap-public-url http://10.10.10.61:5000/v3/ \
--bootstrap-region-id RegionOne
- Cấu hình apache cho keystone
sed -i 's|#ServerName www.example.com:80|ServerName 10.10.10.61|g' /etc/httpd/conf/httpd.conf
- Create symlink cho keystone api
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
ls /etc/httpd/conf.d/
- Start & Enable apache
systemctl enable httpd.service
systemctl restart httpd.service
systemctl status httpd.service
- Tạo file biến môi trường
openrc-admin
cho tài khoản quản trị
cat << EOF >> admin-openrc
export export OS_REGION_NAME=RegionOne
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=passla123
export OS_AUTH_URL=http://10.10.10.61:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
export PS1='[\u@\h \W(admin-openrc-r1)]\$ '
EOF
- Tạo file biến môi trường
openrc-demo
cho tài khoản demo
cat << EOF >> demo-openrc
export export OS_REGION_NAME=RegionOne
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=passla123
export OS_AUTH_URL=http://10.10.10.61:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
export PS1='[\u@\h \W(demo-openrc-r1)]\$ '
EOF
- Sử dụng biến môi trường
source admin-openrc
- Tạo PJ Service
openstack project create --domain default --description "Service Project" service
- Tạo PJ demo
openstack project create --domain default --description "Demo Project" demo
- Tạo User demo và password
openstack user create --domain default --password passla123 demo
- Tạo roles user
openstack role create user
- Thêm roles user trên PJ demo
openstack role add --project demo --user demo user
- Unset các biến môi trường
unset OS_AUTH_URL OS_PASSWORD
- Kiểm tra xác thực trên Project admin
openstack --os-auth-url http://10.10.10.61:5000/v3 --os-project-domain-name Default \
--os-user-domain-name Default --os-project-name admin --os-username admin token issue
- Kiểm tra xác thực trên Project demo
openstack --os-auth-url http://10.10.10.61:5000/v3 --os-project-domain-name default \
--os-user-domain-name default --os-project-name demo --os-username demo token issue
- Sau khi kiểm tra xác thực xong source lại biến môi trường
source admin-openrc
- Nếu trong quá trình thao tác, xác thực token có vấn đề, get lại token mới
openstack token issue
- Đăng nhập vào MySQL
mysql -uroot -p
- Create DB và User cho Glance
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'passla123';
FLUSH PRIVILEGES;
exit;
- Sử dụng biến môi trường
source admin-openrc
- Tạo user glance
openstack user create --domain default --password passla123 glance
- Thêm roles admin cho user glance trên project service
openstack role add --project service --user glance admin
- Kiểm tra lại user glance
openstack role list --user glance --project service
- Khởi tạo dịch vụ glance
openstack service create --name glance --description "OpenStack Image" image
- Tạo các enpoint cho glane
openstack endpoint create --region RegionOne image public http://10.10.10.61:9292
openstack endpoint create --region RegionOne image internal http://10.10.10.61:9292
openstack endpoint create --region RegionOne image admin http://10.10.10.61:9292
- Cài đặt package
yum install -y openstack-glance
- Backup cấu hình glance-api
mv /etc/glance/glance-api.{conf,conf.bk}
- Cấu hình glance-api
cat << EOF >> /etc/glance/glance-api.conf
[DEFAULT]
bind_host = 10.10.10.61
registry_host = 10.10.10.61
[cors]
[database]
connection = mysql+pymysql://glance:[email protected]/glance
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[image_format]
[keystone_authtoken]
auth_uri = http://10.10.10.61:5000
auth_url = http://10.10.10.61:5000
memcached_servers = 10.10.10.61:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = passla123
region_name = RegionOne
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
#rabbit_ha_queues = true
#rabbit_retry_interval = 1
#rabbit_retry_backoff = 2
#amqp_durable_queues= true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
EOF
- Phân quyền lại file cấu hình
chown root:glance /etc/glance/glance-api.conf
- Backup cấu hình glance-registry
mv /etc/glance/glance-registry.{conf,conf.bk}
- Cấu hình glance-api
cat << EOF >> /etc/glance/glance-registry.conf
[DEFAULT]
bind_host = 10.10.10.61
[database]
connection = mysql+pymysql://glance:[email protected]/glance
[keystone_authtoken]
auth_uri = http://10.10.10.61:5000
auth_url = http://10.10.10.61:5000
memcached_servers = 10.10.10.61
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = passla123
region_name = RegionOne
[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
#rabbit_ha_queues = true
#rabbit_retry_interval = 1
#rabbit_retry_backoff = 2
#amqp_durable_queues= true
[oslo_messaging_zmq]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
EOF
- Phân quyền lại file cấu hình
chown root:glance /etc/glance/glance-registry.conf
- Đồng bộ database cho glance
su -s /bin/sh -c "glance-manage db_sync" glance
- Enable và restart Glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
- Download image cirros
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
- Upload image lên Glance
openstack image create "cirros" --file cirros-0.3.5-x86_64-disk.img \
--disk-format qcow2 --container-format bare --public
- Kiểm tra images
openstack image list
- Đăng nhập vào MySQL
mysql -u root -p
- Create DB và User cho Nova
CREATE DATABASE nova;
CREATE DATABASE nova_api;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'passla123';
FLUSH PRIVILEGES;
exit;
- Sử dụng biến môi trường
source admin-openrc
- Tạo user nova
openstack user create --domain default --password passla123 nova
- Thêm role admin cho user placement trên project service
openstack role add --project service --user nova admin
- Tạo dịch vụ nova
openstack service create --name nova --description "OpenStack Compute" compute
- Tạo các endpoint cho dịch vụ compute
openstack endpoint create --region RegionOne compute public http://10.10.10.61:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://10.10.10.61:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://10.10.10.61:8774/v2.1
- Tạo user placement
openstack user create --domain default --password passla123 placement
- Thêm role admin cho user placement trên project service
openstack role add --project service --user placement admin
- Tạo dịch vụ placement
openstack service create --name placement --description "Placement API" placement
- Tạo endpoint cho placement
openstack endpoint create --region RegionOne placement public http://10.10.10.61:8778
openstack endpoint create --region RegionOne placement internal http://10.10.10.61:8778
openstack endpoint create --region RegionOne placement admin http://10.10.10.61:8778
- Cài đặt package
yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-console \
openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
- Backup cấu hình nova
mv /etc/nova/nova.{conf,conf.bk}
- Cấu hình cho nova
cat << EOF >> /etc/nova/nova.conf
[DEFAULT]
my_ip = 10.10.10.61
enabled_apis = osapi_compute,metadata
use_neutron = True
osapi_compute_listen=10.10.10.61
metadata_host=10.10.10.61
metadata_listen=10.10.10.61
metadata_listen_port=8775
firewall_driver = nova.virt.firewall.NoopFirewallDriver
allow_resize_to_same_host=True
notify_on_state_change = vm_and_task_state
transport_url = rabbit://openstack:[email protected]:5672
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:[email protected]/nova_api
[barbican]
[cache]
backend = oslo_cache.memcache_pool
enabled = true
memcache_servers = 10.10.10.61:11211
[cells]
[cinder]
os_region_name = RegionOne
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
connection = mysql+pymysql://nova:[email protected]/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.10.10.61:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://10.10.10.61:5000/v3
memcached_servers = 10.10.10.61:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = passla123
region_name = RegionOne
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
region_name = RegionOne
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
rabbit_ha_queues = true
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
amqp_durable_queues= true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.10.10.61:5000/v3
username = placement
password = passla123
os_region_name = RegionOne
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
novncproxy_host=10.10.10.61
enabled = true
vncserver_listen = 10.10.10.61
vncserver_proxyclient_address = 10.10.10.61
novncproxy_base_url = http://10.10.10.61:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
EOF
- Phân quyền lại file config nova
chown root:nova /etc/nova/nova.conf
- Backup cấu hình nova placement
cp /etc/httpd/conf.d/00-nova-placement-api.{conf,conf.bk}
- Cấu hình virtualhost cho nova placement
cat << 'EOF' >> /etc/httpd/conf.d/00-nova-placement-api.conf
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
EOF
- Cấu hình bind cho nova placement api trên httpd
sed -i -e 's/VirtualHost \*/VirtualHost 10.10.10.61/g' /etc/httpd/conf.d/00-nova-placement-api.conf
sed -i -e 's/Listen 8778/Listen 10.10.10.61:8778/g' /etc/httpd/conf.d/00-nova-placement-api.conf
- Restart httpd
systemctl restart httpd
- Import DB nova
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
Bước import nova database bỏ qua hết các Warning
use_tpool
...
- Check nova cell
nova-manage cell_v2 list_cells
- Enable và start service nova
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service \
openstack-nova-scheduler.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service \
openstack-nova-scheduler.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service
- Kiểm tra cài đặt lại dịch vụ
openstack compute service list
- Cài đặt package
yum install -y openstack-nova-compute libvirt-client
- Backup cấu hình nova
mv /etc/nova/nova.{conf,conf.bk}
- Cấu hình Nova
cat << EOF >> /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:[email protected]:5672
my_ip = 10.10.10.62
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
os_region_name = RegionOne
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.10.10.61:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://10.10.10.61:5000/v3
memcached_servers = 10.10.10.61:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = passla123
region_name = RegionOne
[libvirt]
# egrep -c '(vmx|svm)' /proc/cpuinfo = 0
virt_type = qemu
#virt_type = kvm
#cpu_mode = host-passthrough
#hw_disk_discard = unmap
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
rabbit_ha_queues = true
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
amqp_durable_queues= true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.10.10.61:5000/v3
username = placement
password = passla123
os_region_name = RegionOne
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = True
server_listen = 0.0.0.0
server_proxyclient_address = 10.10.10.62
novncproxy_base_url = http://10.10.10.61:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
EOF
- Phân quyền lại file config
chown root:nova /etc/nova/nova.conf
- Enable và start service
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
- Cài đặt package
yum install -y openstack-nova-compute libvirt-client
- Backup cấu hình nova
mv /etc/nova/nova.{conf,conf.bk}
- Cấu hình Nova
cat << EOF >> /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:[email protected]:5672
my_ip = 10.10.10.63
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
os_region_name = RegionOne
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.10.10.61:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://10.10.10.61:5000/v3
memcached_servers = 10.10.10.61:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = passla123
region_name = RegionOne
[libvirt]
# egrep -c '(vmx|svm)' /proc/cpuinfo = 0
virt_type = qemu
#virt_type = kvm
#cpu_mode = host-passthrough
#hw_disk_discard = unmap
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
rabbit_ha_queues = true
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
amqp_durable_queues= true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.10.10.61:5000/v3
username = placement
password = passla123
os_region_name = RegionOne
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = True
server_listen = 0.0.0.0
server_proxyclient_address = 10.10.10.63
novncproxy_base_url = http://10.10.10.61:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
EOF
- Phân quyền lại file config
chown root:nova /etc/nova/nova.conf
- Enable và start service
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
- Sử dụng biến môi trường
source admin-openrc
- Kiểm tra nova-compute
openstack compute service list
- Đăng nhập MySQL
mysql -uroot -p
- Create DB và User cho Nova
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'passla123';
FLUSH PRIVILEGES;
exit;
- Sử dụng biến môi trường
source admin-openrc
- Tạo user neutron
openstack user create --domain default --password passla123 neutron
- Gán role admin cho user neutron
openstack role add --project service --user neutron admin
- Khởi tạo dịch vụ neutron
openstack service create --name neutron --description "OpenStack Networking" network
- Tạo các endpoint cho neutron
openstack endpoint create --region RegionOne network public http://10.10.10.61:9696
openstack endpoint create --region RegionOne network internal http://10.10.10.61:9696
openstack endpoint create --region RegionOne network admin http://10.10.10.61:9696
- Cài đặt package
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
Lưu ý: Mô hình này sử dụng mô hình mạng provider (flat) sử dụng linuxbridge DHCP agent và metadata agent được chạy trên node compute
- Backup cấu hình neutron
mv /etc/neutron/neutron.{conf,conf.bk}
- Cấu hình neutron
cat << EOF >> /etc/neutron/neutron.conf
[DEFAULT]
bind_host = 10.10.10.61
core_plugin = ml2
service_plugins = router
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
allow_overlapping_ips = True
dhcp_agents_per_network = 2
[agent]
[cors]
[database]
connection = mysql+pymysql://neutron:[email protected]/neutron
[keystone_authtoken]
auth_uri = http://10.10.10.61:5000
auth_url = http://10.10.10.61:35357
memcached_servers = 10.10.10.61:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = passla123
region_name = RegionOne
[matchmaker_redis]
[nova]
auth_url = http://10.10.10.61:35357
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = passla123
region_name = RegionOne
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
amqp_durable_queues = true
rabbit_ha_queues = true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
EOF
- Phân quyền lại file config
chown root:neutron /etc/neutron/neutron.conf
Cấu hình ml2_config
- Backup config ml2_config
mv /etc/neutron/plugins/ml2/ml2_conf.{ini,ini.bk}
- Cấu hình ml2_config
cat << EOF >> /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[l2pop]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
# mechanism_drivers = linuxbridge,l2population
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
# network_vlan_ranges = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true
EOF
- Phân quyền lại file ml2_config
chown root:neutron /etc/neutron/plugins/ml2/ml2_conf.ini
Cấu hình linuxbridge_agent
- Backup lại config linuxbridge_agent
mv /etc/neutron/plugins/ml2/linuxbridge_agent.{ini,init.bk}
- Cấu hình cho linuxbridge_agent
cat << EOF >> /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:ens192
[network_log]
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
# enable_vxlan = true
## network dataVM
local_ip = 10.10.11.61
# l2_population = true
EOF
- Phân quyền lại file linuxbridge_agent
chown root:neutron /etc/neutron/plugins/ml2/linuxbridge_agent.ini
Cấu hình trên file l3_agent
- Backup cấu hình l3_agent
mv /etc/neutron/l3_agent.{ini,ini.bk}
- Cấu hình l3_agent
cat << EOF >> /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
[agent]
[ovs]
EOF
- Phân quyền lại file config
chown root:neutron /etc/neutron/l3_agent.ini
Bổ sung cấu hình phép nova service trên controller sử dụng networking service
- Chỉnh sửa bổ sung cấu hình
[neutron]
trong file/etc/nova/nova.conf
[neutron]
url = http://10.10.10.61:9696
auth_url = http://10.10.10.61:35357
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = passla123
service_metadata_proxy = true
metadata_proxy_shared_secret = passla123
region_name = RegionOne
Các Networking service initialization script yêu cầu symbolic link /etc/neutron/plugin.ini
tới ML2 plug-in config file /etc/neutron/plugins/ml2/ml2_conf.ini
- Khởi tạo symlink cho ml2_config
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
- Đồng bộ database
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
- Khởi động lại Compute API service:
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service \
openstack-nova-consoleauth.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service
- Enable và restart Neutron service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service \
neutron-l3-agent.service
systemctl start neutron-server.service neutron-linuxbridge-agent.service \
neutron-l3-agent.service
- Cài đặt các package
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
Cấu hình neutron
- Backup neutron config
mv /etc/neutron/neutron.{conf,conf.bk}
- Cấu hình neutron
cat << EOF >> /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:[email protected]:5672
auth_strategy = keystone
[agent]
[cors]
[database]
[keystone_authtoken]
auth_uri = http://10.10.10.61:5000
auth_url = http://10.10.10.61:35357
memcached_servers = 10.10.10.61:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = passla123
region_name = RegionOne
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
rabbit_ha_queues = true
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
amqp_durable_queues= true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
EOF
- Phân quyền lại file nova config
chown root:neutron /etc/neutron/neutron.conf
Cấu hình linuxbridge_agent
Linux bridge agent xây dựng layer-2 (bridging và switching) virtual networking infrastructure cho instances và xử lý các security group.
- Backup linuxbridge_agent config
mv /etc/neutron/plugins/ml2/linuxbridge_agent.{ini,ini.bk}
- Config linuxbridge_agent
cat << EOF >> /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:ens224
[network_log]
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
# enable_vxlan = true
## network dataVM
local_ip = 10.10.11.62
# l2_population = true
EOF
- Phân quyền lại cho file linuxbridge_agent config
chown root:neutron /etc/neutron/plugins/ml2/linuxbridge_agent.ini
Cấu hình DHCP agent
- Backup cấu hình dhcp_agent
mv /etc/neutron/dhcp_agent.{ini,ini.bk}
- Cấu hình dhcp_agent
cat << EOF >> /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
force_metadata = True
[agent]
[ovs]
EOF
- Phân quyền lại file config
chown root:neutron /etc/neutron/dhcp_agent.ini
Cấu hình metadata_agent
- Backup file cấu hình
mv /etc/neutron/metadata_agent.{ini,ini.bk}
- Chỉnh sửa config
cat << EOF >> /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = 10.10.10.61
metadata_proxy_shared_secret = passla123
[agent]
[cache]
EOF
- Phân quyền lại file config
chown root:neutron /etc/neutron/metadata_agent.ini
Để nova services trên node compute có thể sử dụng networking service thì chúng ta bổ sung thêm cấu hình cho nova.conf như sau
- Bổ sung cấu hình phần
[neutron]
trong/etc/nova/nova.conf
[neutron]
url = http://10.10.10.61:9696
auth_url = http://10.10.10.61:35357
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = passla123
region_name = RegionOne
- Restart lại Compute service
systemctl restart openstack-nova-compute.service libvirtd.service
- Enable và start linuxbridge_agent dhcp_agent metadata_agent
systemctl enable neutron-linuxbridge-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-linuxbridge-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller ~(admin-openrc)]$ source admin-openrc
[root@controller ~(admin-openrc)]$ openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 314bb32d-bd5b-4f84-833e-40efada2634b | Linux bridge agent | compute01 | None | :-) | UP | neutron-linuxbridge-agent |
| 47074bb4-d9eb-4781-894a-4af6fcfcf97a | DHCP agent | compute01 | nova | :-) | UP | neutron-dhcp-agent |
| 972fb008-d6a7-45a1-8769-72247b76229b | Linux bridge agent | controller | None | :-) | UP | neutron-linuxbridge-agent |
| a412c91c-5933-4277-bef6-618b93c6fd5d | Metadata agent | compute01 | None | :-) | UP | neutron-metadata-agent |
| cba1872d-b46f-4857-bff6-fd9ffeca0fed | L3 agent | controller | nova | :-) | UP | neutron-l3-agent |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
- Cài đặt các package
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
Cấu hình neutron
- Backup neutron config
mv /etc/neutron/neutron.{conf,conf.bk}
- Cấu hình neutron
cat << EOF >> /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:[email protected]:5672
auth_strategy = keystone
[agent]
[cors]
[database]
[keystone_authtoken]
auth_uri = http://10.10.10.61:5000
auth_url = http://10.10.10.61:35357
memcached_servers = 10.10.10.61:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = passla123
region_name = RegionOne
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
rabbit_ha_queues = true
rabbit_retry_interval = 1
rabbit_retry_backoff = 2
amqp_durable_queues= true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
EOF
- Phân quyền lại file nova config
chown root:neutron /etc/neutron/neutron.conf
Cấu hình linuxbridge_agent
- Backup linuxbridge_agent config
mv /etc/neutron/plugins/ml2/linuxbridge_agent.{ini,ini.bk}
- Config linuxbridge_agent
cat << EOF >> /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:ens224
[network_log]
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
# enable_vxlan = true
## network dataVM
local_ip = 10.10.11.63
# l2_population = true
EOF
- Phân quyền lại cho file linuxbridge_agent config
chown root:neutron /etc/neutron/plugins/ml2/linuxbridge_agent.ini
Cấu hình DHCP agent
- Backup cấu hình dhcp_agent
mv /etc/neutron/dhcp_agent.{ini,ini.bk}
- Cấu hình dhcp_agent
cat << EOF >> /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
force_metadata = True
[agent]
[ovs]
EOF
- Phân quyền lại file config
chown root:neutron /etc/neutron/dhcp_agent.ini
Cấu hình metadata_agent
- Backup file cấu hình
mv /etc/neutron/metadata_agent.{ini,ini.bk}
- Chỉnh sửa config
cat << EOF >> /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = 10.10.10.61
metadata_proxy_shared_secret = passla123
[agent]
[cache]
EOF
- Phân quyền lại file config
chown root:neutron /etc/neutron/metadata_agent.ini
Để nova services trên node compute có thể sử dụng networking service thì chúng ta bổ sung thêm cấu hình cho nova.conf như sau
- Bổ sung cấu hình phần
[neutron]
trong/etc/nova/nova.conf
[neutron]
url = http://10.10.10.61:9696
auth_url = http://10.10.10.61:35357
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = passla123
region_name = RegionOne
- Restart lại Compute service
systemctl restart openstack-nova-compute.service libvirtd.service
- Enable và start linuxbridge_agent dhcp_agent metadata_agent
systemctl enable neutron-linuxbridge-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-linuxbridge-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller ~(admin-openrc)]$ source admin-openrc
[root@controller ~(admin-openrc)]$ openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 314bb32d-bd5b-4f84-833e-a263440efadb | Linux bridge agent | compute01 | None | :-) | UP | neutron-linuxbridge-agent |
| 314bb32d-bd5b-4f84-833e-42634b0efada | Linux bridge agent | compute02 | None | :-) | UP | neutron-linuxbridge-agent |
| 47074bb4-d9eb-4781-894a-4af97f6fcfca | DHCP agent | compute01 | nova | :-) | UP | neutron-dhcp-agent |
| 47074bb4-d9eb-4781-894a-4afcf97af6fc | DHCP agent | compute02 | nova | :-) | UP | neutron-dhcp-agent |
| 972fb008-d6a7-45a1-8769-72247b76229b | Linux bridge agent | controller | None | :-) | UP | neutron-linuxbridge-agent |
| a412c91c-5933-4277-bef6-618b93c6fd5d | Metadata agent | compute01 | None | :-) | UP | neutron-metadata-agent |
| a412c91c-5933-4277-bef6-61c6fd5d8b93 | Metadata agent | compute02 | None | :-) | UP | neutron-metadata-agent |
| cba1872d-b46f-4857-bff6-fd9ffeca0fed | L3 agent | controller | nova | :-) | UP | neutron-l3-agent |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
Chuẩn bị: Add thêm 1 ổ /dev/sdb
tối thiểu 100G vào Node Controller
- Kiểm tra ổ
[root@controller ~(admin-openrc)]$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
sda 252:0 0 50G 0 disk
├─sda1 252:1 0 4G 0 part /boot
├─sda2 252:2 0 38G 0 part
│ └─VolGroup00-LogVol01 253:0 0 38G 0 lvm /
└─vda3 252:3 0 8G 0 part [SWAP]
sdb 252:16 0 100G 0 disk
[root@controller ~(admin-openrc)]$
- Đăng nhập MySQL
mysql -u root -p
- Create DB và User cho Cinder
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'passla123';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'passla123';
exit
- Sử dụng biến môi trường
source admin-openrc
- Tạo cinder user:
openstack user create --domain default --password passla123 cinder
- Tạo admin role cho cinder user
openstack role add --project service --user cinder admin
- Tạo đối tượng cinderv2 và cinderv3 service:
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
- Tạo Block Storage service API endpoints:
openstack endpoint create --region RegionOne volumev2 \
public http://10.10.10.61:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 \
internal http://10.10.10.61:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 \
admin http://10.10.10.61:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 \
public http://10.10.10.61:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 \
internal http://10.10.10.61:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 \
admin http://10.10.10.61:8776/v3/%\(project_id\)s
- Cài đặt packages
yum install -y lvm2 device-mapper-persistent-data openstack-cinder targetcli
- Enable và start LVM metadata service
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service
- Chỉnh sửa file
/etc/lvm/lvm.conf
bổ sung filter chosdb
# Line 142
filter = [ "a/sdb/", "r/.*/"]
- Tạo LVM volume /dev/sdb:
pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb
Cấu hình Cinder
- Backup cấu hình cinder
mv /etc/cinder/cinder.{conf,conf.bk}
- Cấu hình cho Cinder
cat << EOF >> /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone
my_ip = 10.10.10.61
enabled_backends = lvm
glance_api_servers = http://10.10.10.61:9292
#rpc_backend = rabbit
#control_exchange = cinder
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm
[backend]
[backend_defaults]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[database]
connection = mysql+pymysql://cinder:[email protected]/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
auth_uri = http://10.10.10.61:5000
auth_url = http://10.10.10.61:35357
memcached_servers = 10.10.10.61:11211
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = passla123
region_name = RegionOne
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
#driver = messagingv2
[oslo_messaging_rabbit]
#rabbit_retry_interval = 1
#rabbit_retry_backoff = 2
#amqp_durable_queues = true
#rabbit_ha_queues = true
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[profiler]
[service_user]
[ssl]
[vault]
EOF
- Phân quyền lại cho file cấu hình
chown root:cinder /etc/cinder/cinder.conf
- Đồng bộ database cho Cinder
su -s /bin/sh -c "cinder-manage db sync" cinder
- Restart lại nova service
systemctl restart openstack-nova-api.service
Enable và start Block Storage services
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service
Kiểm tra
[root@controller ~(admin-openrc)]$ cinder service-list
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
| cinder-scheduler | controller | nova | enabled | up | 2019-05-06T07:20:02.000000 | - |
| cinder-volume | controller@lvm | nova | enabled | up | 2019-05-02T07:42:52.000000 | - |
+------------------+-----------------+------+---------+-------+----------------------------+-----------------+
- Cài đặt packages
yum install -y openstack-dashboard
- Tạo file direct
filehtml=/var/www/html/index.html
touch $filehtml
cat << EOF >> $filehtml
<html>
<head>
<META HTTP-EQUIV="Refresh" Content="0.5; URL=http://10.10.10.61/dashboard">
</head>
<body>
<center> <h1>Redirecting to OpenStack Dashboard</h1> </center>
</body>
</html>
EOF
- Backup file cấu hình
cp /etc/openstack-dashboard/{local_settings,local_settings.bk}
- Chỉnh sửa cấu hình
vi /etc/openstack-dashboard/local_settings
# line 38
ALLOWED_HOSTS = ['*',]
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
# line 171 add
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': ['10.10.10.61:11211',]
}
}
#line 205
OPENSTACK_HOST = "10.10.10.61"
OPENSTACK_KEYSTONE_URL = "http://10.10.10.61:5000/v3"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
#line 481
TIME_ZONE = "Asia/Ho_Chi_Minh"
- Thêm config httpd cho dashboard
echo "WSGIApplicationGroup %{GLOBAL}" >> /etc/httpd/conf.d/openstack-dashboard.conf
- Restart service httpd và memcached
systemctl restart httpd.service memcached.service
http://10.10.10.61
user: admin
password: passla123
Truy cập Admin
--> Network
--> Networks
Chọn Create Network
Truy cập Admin
--> Compute
--> Flavors
Chọn Create Flavor
Truy cập Projects
--> Compute
--> Instance
Chọn Launch Instance
VM tạo thành công nhưng chưa ping được ra ngoài, SSH ngoài vào ko OK
Truy cập Projects
--> Network
--> Security Groups
Chọn Manage Rules
Kiểm tra lại VM ok