Deployment of OpenStack Stein based on ARM

Posted by redsox8185 on Wed, 02 Mar 2022 00:47:54 +0100

OpenStack Stein

Basic part

1, Basic configuration

Turn off the firewall and selinux (all nodes)
systemctl  stop  firewalld.service
systemctl disable firewalld
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce  0
Secret free login (control node)
ssh-keygen
ssh-copy-id  192.168.16.82
ssh-copy-id  192.168.16.83
Configure yum source (all nodes)
yum -y install centos-release-openstack-stein
DNS configuration (all nodes)
1.host parsing
vim /etc/hosts

192.168.16.81 controller
192.168.16.82 compute01
192.168.16.83 compute02

scp /etc/hosts 192.168.16.82:/etc/
scp /etc/hosts 192.168.16.83:/etc/
2. Modify host name
192.168.16.81 Execution: hostnamectl set-hostname controller
192.168.16.82 Execution: hostnamectl set-hostname compute01
192.168.16.83 Execution: hostnamectl set-hostname compute02
NTP configuration
1. Control node
yum -y install chrony
timedatectl set-timezone Asia/Shanghai

vim /etc/chrony.conf
#Add the following contents
allow 192.168.16.0/24
local stratum 10
#Unregister the segment beginning with server
#join
server ntp.aliyun.com iburst

systemctl enable chronyd.service
systemctl start chronyd.service

timedatectl set-ntp yes
2. Calculation node
yum -y install chrony

vim /etc/chrony.conf
#Add "server controller iburst" and delete or comment out the options of other servers
server controller iburst

systemctl enable chronyd.service
systemctl restart chronyd.service

chronyc sources

ntpdate -u 192.168.16.81  #Manual synchronization time
System upgrade (all nodes)
1. Disable kernel upgrade
sed -i '$a exclude=kernel*' /etc/yum.conf
sed -i '$a exclude=kernel centos-release' /etc/yum.conf
2. Upgrade system software
yum -y update
Install client (all nodes)
yum -y install python-openstackclient

2, Basic services

Install database (control node)
  • Control node
yum -y install mariadb mariadb-server python2-PyMySQL

vim /etc/my.cnf.d/openstack.cnf

[mysqld]
bind-address = 192.168.16.81
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
#Control node management IP

#Secret free
[client]
user = root
password = hf3366++

systemctl enable mariadb.service
systemctl start mariadb.service

mysql_secure_installation   #Initialize database
enter—y(Set password) - y—n—n—y

vim /usr/lib/systemd/system/mariadb.service
#Add under file [Service]
LimitNOFILE=65535
LimitNPROC=65535


  • All nodes
#New in configuration file
vim /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536

vim /etc/pam.d/login
session required /lib64/security/pam_limits.so

vim /etc/sysctl.conf
fs.file-max = 65536
 implement sysctl -p
  • Control node
systemctl daemon-reload
systemctl restart mariadb.service

mysql -u root -p
show variables like 'max_connections';  #Query maximum connections
show global status like 'Max_used_connections';  #Query current connections
Install message queuing (control node)
yum -y install rabbitmq-server

systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service

#Creating users and authorizations
rabbitmqctl add_user openstack hf3366++
rabbitmqctl set_user_tags openstack administrator
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
rabbitmqctl list_user_permissions openstack   #View permissions

#Configure browser access
rabbitmq-plugins enable rabbitmq_management
http://192.168.16.81:15672/

#Modify rabbitmq default parameters
vim /usr/lib/systemd/system/rabbitmq-server.service
#newly added
[Service]
LimitNOFILE=16384

systemctl daemon-reload
systemctl restart rabbitmq-server
Install memcached (control node)
yum -y install memcached python-memcached

vim /etc/sysconfig/memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l 127.0.0.1,::1,controller"
#Add "controller" after 127.0.0.1,:: 1 to configure the service to use the management IP address of the controller node
Install etcd (control node)
yum -y install etcd

cp /etc/etcd/etcd.conf{,.bak}
vim /etc/etcd/etcd.conf
#Modify the following 9 parameters and comment out all the rest.
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.16.81:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.16.81:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.16.81:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.16.81:2379"
ETCD_INITIAL_CLUSTER="controller=http://192.168.16.81:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"

systemctl enable etcd
systemctl start etcd

Component installation

1, Keystone installation (control node)

1. Library creation authorization
mysql -u root -p

CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
2. Installation
yum -y install openstack-keystone httpd mod_wsgi
3. Modify the configuration
cp /etc/keystone/keystone.conf{,.bak}

vim /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:hf3366++@controller/keystone
[token]
provider = fernet
4. Populate the database
su -s /bin/sh -c "keystone-manage db_sync" keystone
5. Initialize the Fernet keystore
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

#verification
ls /etc/keystone/ | egrep "fernet-key"
tree /etc/keystone/fernet-keys/
6. Guided identity service
keystone-manage bootstrap --bootstrap-password hf3366++  --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
Configuring Apache HTTP services
1. Edit the "/ etc/httpd/conf/httpd.conf" file and configure the * * "ServerName" * * option as the control node
vim /etc/httpd/conf/httpd.conf
ServerName controller
#The system defaults to notes, which need to be modified
2. Create a link to the "/ usr / share / keystone / WSGI keystone. Conf" file
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

3. Configure self start

systemctl enable httpd.service
systemctl start httpd.service

4. Create environment variable script

vim admin-openrc

export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=hf3366++
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
Create domains, projects, users, and roles
1. Create
1.Create a new domain
openstack domain create --description "An Example Domain" example
2.establish Service
openstack project create --domain default --description "Service Project" service
3.Create a normal project myproject
openstack project create --domain default --description "Demo Project" myproject
4.Create ordinary users myuser(For ordinary users
openstack user create --domain default --password hf3366++ myuser
5.Create role
openstack role create myrole
6.take myrole Add role to myproject Projects and myuser user
openstack role add --project myproject --user myuser myrole
2. View
openstack user list
openstack role list
openstack project list
openstack role assignment list

2, Placement installation (control node)

1. Library creation authorization
mysql -u root -p

CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
2. Configure users and endpoints
1.Create user placement
openstack user create --domain default --password hf3366++ placement
2.Add role
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
3.establish Placement API Service Endpoint 
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
3. Installation configuration
yum -y install openstack-placement-api

cp /etc/placement/placement.conf{,.bak}
vim /etc/placement/placement.conf

[placement_database]
connection = mysql+pymysql://placement:hf3366++@controller/placement
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = placement
password = hf3366++
4. Modify the configuration file "/ etc/httpd/conf.d/00-placement-api.conf"
vim /etc/httpd/conf.d/00-placement-api.conf
  <Directory /usr/bin>
    <IfVersion >= 2.4>
      Require all granted
    </IfVersion>
    <IfVersion < 2.4>
      Order allow,deny
      Allow from all
    </IfVersion>
  </Directory>
#Add in #SSLCertificateKeyFile ... under
5. Populate the database
su -s /bin/sh -c "placement-manage db sync" placement
mysql -e "use placement;show tables;" -u placement -p  #verification
6. Start
systemctl restart httpd
7. Check the execution status
placement-status upgrade check
8. Install pip
yum install -y epel-release
yum install -y python-pip
rm -rf /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel-testing.repo
9. Run the following command for the display location API
1.install osc-placement plug-in unit
pip install osc-placement
2.Lists the available resource classes and characteristics
openstack --os-placement-api-version 1.2 resource class list --sort-column name
openstack --os-placement-api-version 1.6 trait list --sort-column name

3, Grace installation

1. Library creation authorization
mysql -u root -p
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
2. Create roles and users
1.establish glance user
openstack user create --domain default --password hf3366++ glance
2.Add roles to projects and users
openstack role add --project service --user glance admin
3.Create service entity
openstack service create --name glance --description "OpenStack Image" image
4.establish Image service API Endpoint
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
3. Installation configuration
yum -y install openstack-glance

cp /etc/glance/glance-api.conf{,.bak}
vim /etc/glance/glance-api.conf

[database]
connection = mysql+pymysql://glance:hf3366++@controller/glance
[keystone_authtoken]
www_authenticate_uri  = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = hf3366++
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

cp /etc/glance/glance-registry.conf{,.bak}
vim /etc/glance/glance-registry.conf

[database]
connection = mysql+pymysql://glance:hf3366++@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = hf3366++
[paste_deploy]
flavor = keystone
4. Populate the database
su -s /bin/sh -c "glance-manage db_sync" glance
mysql glance -e "show tables;" -u glance -p  #verification
5. Start the Image service and configure it to start when the system boots
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
6. Verify glance
1.Download Image
wget https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img
2.Upload image
openstack image create "cirros" --file cirros-0.4.0-aarch64-disk.img --disk-format qcow2 --container-format bare --public
3.verification
openstack image list

4, Nova installation (control node)

1. Library creation authorization
mysql -u root -p

CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'hf3366++';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'hf3366++';

GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'hf3366++';

FLUSH PRIVILEGES;
quit
2. Create roles and users
1.establish nova user
openstack user create --domain default --password  hf3366++ nova
2.take admin Add role to nova user
openstack role add --project service --user nova admin
3.establish nova entity
openstack service create --name nova --description "OpenStack Compute" compute
4.establish compute API Service Endpoint 
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
3. Installation configuration
yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler

cp /etc/nova/nova.conf{,.bak}
vim /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:hf3366++@controller
my_ip = 192.168.16.81
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
allow_resize_to_same_host = true
[api_database]
connection = mysql+pymysql://nova:hf3366++@controller/nova_api
[database] 
connection = mysql+pymysql://nova:hf3366++@controller/nova
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = hf3366++
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
novncproxy_host=0.0.0.0
novncproxy_port=6080
novncproxy_base_url=http://192.168.16.81:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
username = nova
password = hf3366++
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
novncproxy_host=0.0.0.0
novncproxy_port=6080
novncproxy_base_url=http://192.168.16.81:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = hf3366++
4. Populate the database
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova

#verification
mysql nova_api -e "show tables;" -u nova -p
mysql nova -e "show tables;" -u nova -p
mysql nova_cell0 -e "show tables;" -u nova -p
5. Verify that cell0 and cell1 are registered correctly
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
6. Start the compute service and set it to start when the system boots
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
7. Inspection
openstack compute service list

5, Neutron installation (control node)

1. Library creation authorization
mysql -u root -p

CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost'  IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'hf3366++';

FLUSH PRIVILEGES;
quit
2. Create roles and users
1.establish Neutron user
openstack user create --domain default --password hf3366++ neutron
2.take admin Add role to Neutron user
openstack role add --project service --user neutron admin
3.establish Neutron service entity
openstack service create --name neutron --description "OpenStack Networking" network
4.Create web server API Endpoint
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
3. Installation configuration
  • install
yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
  • Modify / etc / neutron / neutron conf
cp /etc/neutron/neutron.conf{,.bak}
vim /etc/neutron/neutron.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:hf3366++@controller
my_ip = 10.0.0.47
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
allow_resize_to_same_host = true

[api_database]
connection = mysql+pymysql://nova:hf3366++@controller/nova_api
[database]
connection = mysql+pymysql://nova:hf3366++@controller/nova

[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = hf3366++

[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
novncproxy_host=0.0.0.0
novncproxy_port=6080
novncproxy_base_url=http://10.0.0.47:6080/vnc_auto.html

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = hf3366++
  • Modify / etc/neutron/plugins/ml2/ml2_conf.ini
cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vlan]
network_vlan_ranges = provider
[securitygroup]
enable_ipset = true

#tenant_network_types is null
  • Modify / etc/neutron/plugins/ml2/linuxbridge_agent.ini
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:enp125s0f1
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  • Modify / etc/neutron/dhcp_agent.ini
cp /etc/neutron/dhcp_agent.ini{,.bak}
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

  • Configure metadata proxy
cp /etc/neutron/metadata_agent.ini{,.bak}
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = hf3366++
4. Configure bridge filter
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

modprobe br_netfilter
sysctl -p
sed -i '$amodprobe br_netfilter' /etc/rc.local
5. Modify nova
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = hf3366++
service_metadata_proxy = true
metadata_proxy_shared_secret = hf3366++
6. Create network initialization
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
7. Populate the database
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
mysql neutron -e "show tables;" -u neutron -p #verification
8. Restart Nova API
systemctl restart openstack-nova-api.service
9. Start the network service and configure it to start when the system boots
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

openstack network agent list #see

6, nova installation (compute node)

Front installation

1. Install QEMU
1.Install dependent packages
yum -y install glib2-devel zlib-devel pixman-devel librbd-devel libaio-devel

2.Download source code
wget https://download.qemu.org/qemu-4.0.0.tar.xz
3.Compile and install
tar -xvf qemu-4.0.0.tar.xz
cd qemu-4.0.0
./configure --enable-rbd --enable-linux-aio
make -j 50
make install -j 20

4.add to lib library
sed -i '$ainclude /usr/local/lib' /etc/ld.so.conf
5.Make the configuration effective.
ldconfig
2. Install libvirt
1.install edk2
wget https://www.kraxel.org/repos/firmware.repo -O /etc/yum.repos.d/firmware.repo
yum -y install edk2.git-aarch64

2.Install dependent packages.
yum -y install gnutls-devel libnl-devel libxml2-devel yajl-devel device-mapper-devel libpciaccess-devel

3.Download the source code.
wget https://libvirt.org/sources/libvirt-5.6.0-1.fc30.src.rpm -O /root/libvirt-5.6.0-1.fc30.src.rpm

4.compile
cd /root/
rpm -i libvirt-5.6.0-1.fc30.src.rpm

yum -y install libxml2-devel readline-devel ncurses-devel libtasn1-devel gnutls-devel libattr-devel libblkid-devel augeas systemd-devel libpciaccess-devel yajl-devel sanlock-devel libpcap-devel libnl3-devel libselinux-devel dnsmasq radvd cyrus-sasl-devel libacl-devel parted-devel device-mapper-devel xfsprogs-devel librados2-devel librbd1-devel glusterfs-api-devel glusterfs-devel numactl-devel libcap-ng-devel fuse-devel netcf-devel libcurl-devel audit-libs-devel systemtap-sdt-devel nfs-utils dbus-devel scrub numad qemu-img rpm-build

rpmbuild -ba ~/rpmbuild/SPECS/libvirt.spec

If there is an error, you can compile using another method:
rpmbuild --rebuild /root/libvirt-5.6.0-1.fc30.src.rpm

5.install
yum install -y /root/rpmbuild/RPMS/aarch64/*.rpm
6.restart libvirt Service.
systemctl restart libvirtd
7.Modify profile“/etc/libvirt/qemu.conf"
vim /etc/libvirt/qemu.conf
nvram = ["/usr/share/AAVMF/AAVMF_CODE.fd:/usr/share/AAVMF/AAVMF_VARS.fd","/usr/share/edk2.git/aarch64/QEMU_EFI-pflash.raw:/usr/share/edk2.git/aarch64/vars-template-pflash.raw"]
8.test libvirt and QEMU edition.
virsh version
3. Install Nova compute
1.Install components
yum -y install openstack-nova-compute
2.Edit file/etc/nova/nova.conf
cp /etc/nova/nova.conf{,.bak}
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:hf3366++@controller
my_ip = 192.168.16.82
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = hf3366++
[libvirt]
virt_type = kvm
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = hf3366++
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
vncserver_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = hf3366++
4. Start and execute the automatic startup
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
5. Add the calculation node to the cell database (executed by the control node)
1.View the host of the database
openstack compute service list --service nova-compute
2.Discovery host
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
3.Auto discovery
vim /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
4.restart
systemctl restart openstack-nova-api.service
6. Verification
1.List service components
openstack compute service list
2.list Identity In service API Endpoint to verify and Identity Service connection.
openstack catalog list
3.list Glance In service Image
openstack image list
4.Check cells and placement API Whether it operates normally and whether other necessary prerequisites are in place
nova-status upgrade check

7, Neutron installation (compute node)

1. Installation
yum -y install openstack-neutron-linuxbridge ebtables ipset
2. Modify the configuration / etc / neutron / neutron conf
cp /etc/neutron/neutron.conf{,.bak}
vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:hf3366++@controller
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = hf3366++
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
3. Modify nova configuration
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = hf3366++

systemctl restart openstack-nova-compute.service  #restart
4. Modify / etc/neutron/plugins/ml2/linuxbridge_agent.ini
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:enp125s0f1
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
5. Configure bridge filter
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

modprobe br_netfilter
sysctl -p
sed -i '$amodprobe br_netfilter' /etc/rc.local
6. Start the Linux bridge agent and configure it to start when the system boots
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service

8, Horizon installation (control node)

1. Installation
yum -y install openstack-dashboard
2. Edit configuration
Edit profile“/etc/openstack-dashboard/local_settings"And complete the following operations:
cp /etc/openstack-dashboard/local_settings{,.bak}
vim /etc/openstack-dashboard/local_settings

1.Configure the dashboard to controller Use on node OpenStack Service.
OPENSTACK_HOST = "controller"

2.Allow all hosts to access. Pay attention to the format. There is a space after the comma
ALLOWED_HOSTS = ['*', ]

3.to configure memcached For session storage service, please comment out any other session storage configuration and pay attention to the format
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': 'controller:11211',
    }
}

4.Enable Identity API Version 3
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

5.Enable support for domains
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

6.to configure API Version, please pay attention to the configuration format
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}

7.to configure Default Is the default domain for users created through the dashboard
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

8.to configure user Default roles for users created through dashboards
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

9.Please disable support for layer 3 network services
OPENSTACK_NEUTRON_NETWORK = {
    'enable_router': False,
    'enable_quotas': False,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': False,
    'enable_firewall': False,
    'enable_vpn': False,
    'enable_fip_topology_check': False,
10.Configure time zone
TIME_ZONE = "Asia/Shanghai"
3. Edit the configuration file "/ etc/httpd/conf.d/openstack-dashboard.conf"

Add the following:

vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}

#Add in WSGISocketPrefix run/wsgi

4. Grant permissions to users and groups of apache in "/ usr / share / openstack dashboard /" folder

chown -R apache:apache /usr/share/openstack-dashboard/

5. Restart the Web server and session storage service

systemctl restart httpd.service memcached.service

9, Create virtual machine

1. Create a network
openstack network create --share --external --provider-physical-network provider --provider-network-type flat enp125s0f1
2. Create a subnet
openstack subnet create --network provider --allocation-pool start=192.167.0.3,end=192.167.0.250 --dns-nameserver 114.114.114.114 --gateway 192.167.0.1 --subnet-range 192.167.0.0/19 subnet1
3. View network and subnet information
neutron net-list
neutron subnet-list
4. Create instance type
openstack flavor create --vcpus 1 --ram 64 --disk 1 m1.nano
5. Create a security group
stay dashboard Just create
6. View examples
View after successful creation
openstack server list

10, Cinder installation (control node)

1. Library creation authorization
mysql -u root -p
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
2. Create roles and users
1.Create service credentials and Cinder user
openstack user create --domain default --password hf3366++ cinder
2.take admin Add role to Cinder user
openstack role add --project service --user cinder admin
3.establish cinderv2 and cinderv3 service entity
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
4.establish Block Storage service API Endpoint
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s

openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s
3. Installation configuration
  • install
yum -y install openstack-cinder
  • Edit the configuration file "/ etc/cinder/cinder.conf"
cp /etc/cinder/cinder.conf{,.bak}
vim /etc/cinder/cinder.conf

[DEFAULT]
transport_url = rabbit://openstack:hf3366++@controller
auth_strategy = keystone
my_ip = 192.168.16.81
enabled_backends = ceph
[database]
connection = mysql+pymysql://cinder:hf3366++@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = hf3366++
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
4. Populate the database
su -s /bin/sh -c "cinder-manage db sync" cinder
#Ignore prompt
mysql cinder -e "show tables;" -u cinder -p #query
5. Configuration calculation uses block storage (control node)
vim /etc/nova/nova.conf
#Add the following contents
[cinder]
os_region_name = RegionOne
6. Restart (control node)
1 start-up Compute API service
systemctl restart openstack-nova-api.service
2.start-up Block Storage Service and configure it to start when the system boots
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service openstack-cinder-backup.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service  openstack-cinder-backup.service
3.Verify the installation of the control node, state by up,The status is normal
openstack volume service list
#Cinder volume down is because ceph related services have not been enabled and integrated into cinder volume
7. Create type
cinder type-list
cinder type-create ceph

11, Cinder installation (compute node)

1. Installation
yum -y install openstack-cinder targetcli python-keystone
2. Modify the configuration
cp /etc/cinder/cinder.conf{,.bak}
vim /etc/cinder/cinder.conf

[DEFAULT]
transport_url = rabbit://openstack:hf3366++@controller
auth_strategy = keystone
my_ip = 192.168.16.82
enabled_backends = ceph
glance_api_servers = http://controller:9292
[database]
connection = mysql+pymysql://cinder:hf3366++@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = hf3366++
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
3. Modify other nodes
cp /etc/cinder/cinder.conf{,.bak}
scp /etc/cinder/cinder.conf compute02:/etc/cinder/
vim /etc/cinder/cinder.conf
my_ip = 192.168.16.83
4. Start
systemctl enable openstack-cinder-volume.service target.service openstack-cinder-backup.service
systemctl start openstack-cinder-volume.service target.service openstack-cinder-backup.service

#Restart control node
systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service
5. Verification
openstack volume service list

# At this time, the back-end storage service is ceph, but the related services of ceph have not been enabled and integrated into the cinder volume, resulting in the status of the cinder volume service being "down"

12, Ceph installation

1. Configure source (all nodes)

Configure in the controller node and compute node of OpenStack

vim /etc/yum.repos.d/ceph.repo

[Ceph]
name=Ceph packages for $basearch
baseurl=http://download.ceph.com/rpm-nautilus/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://download.ceph.com/rpm-nautilus/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

[Ceph-source]
name=Ceph source packages
baseurl=http://download.ceph.com/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
2. Update source
 yum clean all && yum makecache
3. Install the automatic deployment tool (control node)
yum install ceph-deploy python-setuptools
ceph-deploy --version
4.ceph_ Installation of montior node (control node)
mkdir /ceph-deploy && cd /ceph-deploy
ceph-deploy new controller --public-network 192.168.16.0/24 --cluster-network 192.168.16.0/24

--public-network: ceph External use, access ceph Entrance use.
--cluster-network: Used for data synchronization (copy operation)

#verification
ll
-rw-r--r--. 1 root root  268 12 February 10:54 ceph.conf
-rw-r--r--. 1 root root 3090 12 February 10:54 ceph-deploy-ceph.log
-rw-------. 1 root root   73 12 February 10:54 ceph.mon.keyring

cat ceph.conf
[global]
fsid = 554ef778-c7fd-4c7f-a1e3-fb9e7fb4afd5
public_network = 192.168.16.0/24
cluster_network = 192.168.16.0/24
mon_initial_members = controller
mon_host = 192.168.16.81
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

vim ceph.conf
#Under the global field, add:
mon clock drift allowed = 2
mon clock drift warn backoff = 30
5. Install CEPH (all nodes)
# Will install CEPH CEPH Mgr CEPH mon CEPH radosgw CEPH MDS
yum -y install ceph
6. Deploy cluster environment (control node)
1.Push profile (default)/etc/ceph)
ceph-deploy --overwrite-conf config push controller compute01 compute02

2.Create, specify mon this is controller 
ceph-deploy --overwrite-conf mon create controller

3.mon initialization
# The mon related configuration will be automatically configured after initialization
ceph-deploy mon create-initial
ls -l *.keyring #Key file for initialization

-rw-------. 1 root root 113 12 February 11:28 ceph.bootstrap-mds.keyring
-rw-------. 1 root root 113 12 February 11:28 ceph.bootstrap-mgr.keyring
-rw-------. 1 root root 113 12 February 11:28 ceph.bootstrap-osd.keyring
-rw-------. 1 root root 113 12 February 11:28 ceph.bootstrap-rgw.keyring
-rw-------. 1 root root 151 12 February 11:28 ceph.client.admin.keyring
-rw-------. 1 root root  73 12 February 10:54 ceph.mon.keyring

 
4.mon High availability
ceph-deploy mon add compute01 --address 192.168.16.82 
ceph-deploy mon add compute02 --address 192.168.16.83
 
5.Copy the configuration information to each node
ceph-deploy --overwrite-conf config push controller compute01 compute02
ceph-deploy admin controller compute01 compute02
 
6.query mon state
ceph -s 
 mon: 3 daemons, quorum controller,compute01,compute02 (age 2m)
 
7.View quorum status
ceph quorum_status --format json-pretty    "quorum_names": #How many nodes are arbitrating

8.ceph_osd install
ceph-deploy disk zap controller /dev/sdb
ceph-deploy osd create controller --data /dev/sdb

#compute01 
ceph-deploy disk zap compute01 /dev/sdb
ceph-deploy osd create compute01 --data /dev/sdb

ceph-deploy disk zap compute01 /dev/sdc
ceph-deploy osd create compute01 --data /dev/sdc

ceph-deploy disk zap compute01 /dev/sdd
ceph-deploy osd create compute01 --data /dev/sdd

#compute02
ceph-deploy disk zap compute02 /dev/sdb
ceph-deploy osd create compute02 --data /dev/sdb

ceph-deploy disk zap compute02 /dev/sdc
ceph-deploy osd create compute02 --data /dev/sdc

ceph-deploy disk zap compute02 /dev/sdd
ceph-deploy osd create compute02 --data /dev/sdd

7.deploy mgr High availability
ceph-deploy --overwrite-conf mgr create controller compute01 compute02
#The luminous version needs to start mgr, otherwise ceph -s will be prompted with no active mgr. The official document suggests starting mgr on each monitor

8.see osd state
ceph osd tree
# All STATUS must be up. If it is down, there is a problem with ceph

9.see montior state
ceph mon stat
e3: 3 mons at {compute01=[v2:192.168.16.82:3300/0,v1:192.168.16.82:6789/0],compute02=[v2:192.168.16.83:3300/0,v1:192.168.16.83:6789/0],controller=[v2:192.168.16.81:3300/0,v1:192.168.16.81:6789/0]}, election epoch 22, leader 0 controller, quorum 0,1,2 controller,compute01,compute02

13, Docking ceph

Preconditions
1. Create storage pool
ceph osd pool create volumes 128
ceph osd pool create images 128
ceph osd pool create backups 128
ceph osd pool create vms 128

ceph osd pool ls #see
2. Add a secret key to libvirt
cd /etc/ceph/

UUID=$(uuidgen)
cat > secret.xml <<EOF
> <secret ephemeral='no' private='no'>
> <uuid>${UUID}</uuid>
> <usage type='ceph'>
> <name>client.cinder secret</name>
> </usage>
> </secret>
> EOF

scp /etc/ceph/secret.xml  compute01:/etc/ceph/
scp /etc/ceph/secret.xml  compute02:/etc/ceph/
Grace docking ceph cluster
  • Note: delete the existing instance and image before docking ceph. So that it cannot be deleted after the later docking is successful.

  • Specific objectives: the image is stored on ceph

1. Allocate the resource pool as rbd (convenient for management)
ceph osd pool application enable images rbd
2. Modify the configuration file
vim /etc/glance/glance-api.conf

[DEFAULT]
show_image_direct_url = True
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8

[paste_deploy]
flavor = keystone
3. Create a key ring
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring
4. Authorization key ring
chgrp glance /etc/ceph/ceph.client.glance.keyring 
chmod 0640 /etc/ceph/ceph.client.glance.keyring
5. Add key ring to ceph
vim ceph.conf
[client.glance]
keyring = /etc/ceph/ceph.client.glance.keyring
#Add the key ring file to the ceph configuration on the control node and put it directly at the bottom of the configuration file

[root@controller ceph-deploy]# ceph-deploy --overwrite-conf config push controller compute01 compute02
#Send to other nodes
6. Restart
systemctl restart openstack-glance-api.service
7. Test
1.Upload image
openstack image create "cirros" --file cirros-0.4.0-aarch64-disk.img --disk-format qcow2 --container-format bare --public
2.see
glance image-list
3.Cluster view
rbd ls images
rbd -p images info  520be008-c4f4-418f-9176-9b0e03d20b72
4.View snapshot
rbd snap list images/520be008-c4f4-418f-9176-9b0e03d20b72
5.View snapshot details (protected status)
rbd info images/520be008-c4f4-418f-9176-9b0e03d20b72@snap
cinder docking ceph cluster
1. Allocate the resource pool as rbd (convenient for management)
ceph osd pool application enable volumes rbd
ceph osd pool application get volumes
2. Create a secret key ring
ceph auth get-or-create client.volumes mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images' mgr 'profile rbd pool=volumes, profile rbd pool=vms' -o /etc/ceph/ceph.client.volumes.keyring
3. Distribute the secret key ring

Copy the key and configuration file to the compute01 and compute02 calculation nodes

scp /etc/ceph/ceph.client.volumes.keyring compute01:/etc/ceph/ 
scp /etc/ceph/ceph.client.volumes.keyring compute02:/etc/ceph/
4. Distribute key
cd /etc/ceph/
ceph auth get-key client.volumes |ssh controller tee /etc/ceph/client.volumes.key
ceph auth get-key client.volumes |ssh compute01 tee /etc/ceph/client.volumes.key
ceph auth get-key client.volumes |ssh compute02 tee /etc/ceph/client.volumes.key
5. Set key ring access permission (all nodes)
chgrp cinder /etc/ceph/ceph.client.volumes.keyring 
chmod 0640 /etc/ceph/ceph.client.volumes.keyring
6. Modify the configuration file (all nodes)
#Add the key ring to the ceph configuration file on all nodes. Note that if you don't need to add it again
vim /etc/ceph/ceph.conf
[client.volumes]
keyring = /etc/ceph/ceph.client.volumes.keyring

scp /etc/ceph/ceph.conf controller:/etc/ceph/
scp /etc/ceph/ceph.conf compute01:/etc/ceph/
scp /etc/ceph/ceph.conf compute02:/etc/ceph/
7. Set uuid (all nodes)
vim /etc/ceph/cinder.uuid.txt
ae3cf04d-11ce-4983-a601-c2c5bd19bf6d

scp /etc/ceph/cinder.uuid.txt compute01:/etc/ceph/
scp /etc/ceph/cinder.uuid.txt compute02:/etc/ceph/
8. Create a key in virsh so that kvm can access the vloume volume of ceph pool
vim /etc/ceph/cinder.xml
<secret ephemeral='no' private='no'>
  <uuid>ae3cf04d-11ce-4983-a601-c2c5bd19bf6d</uuid>
  <usage type='ceph'>
    <name>client.cinder secret</name>
  </usage>
</secret>

scp /etc/ceph/cinder.xml compute01:/etc/ceph/
scp /etc/ceph/cinder.xml compute02:/etc/ceph/
9. Insert cinder XML (all nodes)
virsh secret-define --file  /etc/ceph/cinder.xml
10. Insert (all nodes)
virsh secret-set-value --secret ae3cf04d-11ce-4983-a601-c2c5bd19bf6d --base64 $(cat /etc/ceph/client.volumes.key)
11. Modify the configuration file (all nodes)
vim /etc/cinder/cinder.conf 
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = volumes
rbd_secret_uuid = ae3cf04d-11ce-4983-a601-c2c5bd19bf6d
12. Restart
systemctl restart openstack-cinder-volume.service 
tail -f /var/log/cinder/volume.log
cinder_backup docking ceph cluster
1. Allocate the resource pool as rbd (convenient for management)
ceph osd pool application enable backups rbd
2. Create a secret key ring
ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups' -o /etc/ceph/ceph.client.cinder-backup.keyring
3. Distribute the secret key ring
scp /etc/ceph/ceph.client.cinder-backup.keyring compute01:/etc/ceph/ 
scp /etc/ceph/ceph.client.cinder-backup.keyring compute02:/etc/ceph/
4. Distribute key
cd /etc/ceph/
ceph auth get-key client.cinder-backup |ssh controller tee /etc/ceph/client.cinder-backup.key
ceph auth get-key client.cinder-backup |ssh compute01 tee /etc/ceph/client.cinder-backup.key
ceph auth get-key client.cinder-backup |ssh compute02 tee /etc/ceph/client.cinder-backup.key
5. Set key ring access permission (all nodes)
chgrp cinder /etc/ceph/ceph.client.cinder-backup.keyring
chmod 0640 /etc/ceph/ceph.client.cinder-backup.keyring
6. Add the secret key ring to the configuration file (all nodes)
vim /etc/ceph/ceph.conf
[client.cinder-backup]
keyring = /etc/ceph/ceph.client.cinder-backup.keyring

scp /etc/ceph/ceph.conf controller:/etc/ceph/
scp /etc/ceph/ceph.conf compute01:/etc/ceph/
scp /etc/ceph/ceph.conf compute02:/etc/ceph/
7. Set uuid
1.generate uuid
uuidgen
88e687c2-7847-464b-9703-dc19062480db

2.create a file
vim /etc/ceph/cinder-backup.uuid.txt
88e687c2-7847-464b-9703-dc19062480db
8. Create secret key
vim /etc/ceph/cinder-backup.xml
<secret ephemeral='no' private='no'>
  <uuid>88e687c2-7847-464b-9703-dc19062480db</uuid>
  <usage type='ceph'>
    <name>cinder-backup secret</name>
  </usage>
</secret>

scp /etc/ceph/cinder-backup.xml compute01:/etc/ceph/
scp /etc/ceph/cinder-backup.xml compute02:/etc/ceph/
9. Insert cinder backup XML (all nodes)
virsh secret-define --file /etc/ceph/cinder-backup.xml
10. Insert (all nodes)
virsh secret-set-value --secret 88e687c2-7847-464b-9703-dc19062480db --base64 $(cat /etc/ceph/client.cinder-backup.key)
11. Update configuration file (all nodes)
vim /etc/cinder/cinder.conf
[DEFAULT]
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 4194304
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
rbd_user = cinder-backup
rbd_secret_uuid = 88e687c2-7847-464b-9703-dc19062480db
12. Restart
systemctl restart openstack-cinder-backup.service
nova connects to ceph (start the virtual machine on ceph)
1. Allocate the resource pool as rbd (convenient for management)
ceph osd pool application enable vms rbd
ceph osd pool application get vms
2. Create a secret key ring
ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' -o /etc/ceph/ceph.client.nova.keyring
3. Distribute the secret key ring

Create a key ring for nova (ceph node operation) and copy the key to the computing node

cd /ceph-deploy
scp /etc/ceph/ceph.client.nova.keyring compute01:/etc/ceph/
scp /etc/ceph/ceph.client.nova.keyring compute02:/etc/ceph/
4. Distribute key
ceph auth get-key client.nova |ssh controller tee /etc/ceph/client.nova.key
ceph auth get-key client.nova |ssh compute01 tee /etc/ceph/client.nova.key
ceph auth get-key client.nova |ssh compute02 tee /etc/ceph/client.nova.key
5. Set key ring access permission (all nodes)
chgrp nova /etc/ceph/ceph.client.nova.keyring 
chmod 0640 /etc/ceph/ceph.client.nova.keyring
6. Modify the configuration file
vim /etc/ceph/ceph.conf 
[client.nova]
keyring = /etc/ceph/ceph.client.nova.keyring

scp /etc/ceph/ceph.conf compute01:/etc/ceph/
scp /etc/ceph/ceph.conf compute02:/etc/ceph/
7. Set uuid
1.generate uuid
uuidgen
22809d1d-e5e7-4256-b615-4510f221ddba
2.create a file
vim /etc/ceph/nova.uuid.txt
22809d1d-e5e7-4256-b615-4510f221ddba

scp /etc/ceph/nova.uuid.txt compute01:/etc/ceph/
scp /etc/ceph/nova.uuid.txt compute02:/etc/ceph/
8. Create key
vim /etc/ceph/nova.xml
<secret ephemeral='no' private='no'>
  <uuid>22809d1d-e5e7-4256-b615-4510f221ddba</uuid>
  <usage type='ceph'>
    <name>client.nova secret</name>
  </usage>
</secret>

scp /etc/ceph/nova.xml compute01:/etc/ceph/
scp /etc/ceph/nova.xml compute02:/etc/ceph/
9. Insert (all nodes)
virsh secret-define --file /etc/ceph/nova.xml

virsh secret-set-value --secret 22809d1d-e5e7-4256-b615-4510f221ddba --base64 $(cat /etc/ceph/client.nova.key)
10. Update nova configuration file (all compute nodes)
vim /etc/nova/nova.conf
[DEFAULT]
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"

[libvirt]
virt_type = kvm
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
disk_cachemodes="network=writeback"
rbd_user = nova
rbd_secret_uuid = 22809d1d-e5e7-4256-b615-4510f221ddba
11. Restart (compute node)
 systemctl restart openstack-nova-compute
12. Create an instance and verify it
rbd -p vms ls
#Compare the difference between and dashboard

14, Heat installation (control node)

1. Library creation authorization
mysql -u root -p
CREATE DATABASE heat;
GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' IDENTIFIED BY 'hf3366++';
GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' IDENTIFIED BY 'hf3366++';
FLUSH PRIVILEGES;
quit
2. Create roles and users
1.establish Heat user
openstack user create --domain default --password hf3366++ heat
2.take admin Add role to heat user
openstack role add --project service --user heat admin
3.establish heat and heat-cfn service entity
openstack service create --name heat --description "Orchestration" orchestration
openstack service create --name heat-cfn --description "Orchestration" cloudformation
4.establish Orchestration service API Endpoint
openstack endpoint create --region RegionOne orchestration public http://controller:8004/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne orchestration internal http://controller:8004/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne orchestration admin http://controller:8004/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne cloudformation public http://controller:8000/v1
openstack endpoint create --region RegionOne cloudformation internal http://controller:8000/v1
openstack endpoint create --region RegionOne cloudformation admin http://controller:8000/v1
3. Add stack management
#Orchestration requires additional information in the Identity Service to manage the stack.
1.establish Heat Domain containing stack items and users
openstack domain create --description "Stack projects and users" heat
2.establish heat_domain_admin Users to manage heat Items and users in the domain and set the password
openstack user create --domain heat --password hf3366++ heat_domain_admin
3.take admin Roles added to the domain heat_domain_admin user heat To enable the user's administrative stack administrative rights heat_domain_admin
openstack role add --domain heat --user-domain heat --user heat_domain_admin admin
4.stay Heat Create a general item in the domain demo And regular users demo
openstack project create --domain heat --description "Demo Project" demo
openstack user create --domain heat --password hf3366++ demo
5.establish heat_stack_owner role
openstack role create heat_stack_owner
6.take heat_stack_owner Add role to demo Projects and users to enable stack management for users demo
openstack role add --project demo --user demo heat_stack_owner
7.establish heat_stack_user role
openstack role create heat_stack_user
4. Installation configuration
1.Install package
yum -y install openstack-heat-api openstack-heat-api-cfn openstack-heat-engine
2.Modification“/etc/rabbitmq/rabbitmq.config"
vim /etc/rabbitmq/rabbitmq.config
{delegate_count, 96}
3.Edit“/etc/heat/heat.conf"File and complete the following configuration:
cp /etc/heat/heat.conf{,.bak}
vim /etc/heat/heat.conf
[DEFAULT]
transport_url = rabbit://openstack:hf3366++@controller
heat_metadata_server_url = http://controller:8000
heat_waitcondition_server_url = http://controller:8000/v1/waitcondition
stack_domain_admin = heat_domain_admin
stack_domain_admin_password = hf3366++
stack_user_domain_name = heat
num_engine_workers = 4
[heat_api]
workers = 4
[database]
connection = mysql+pymysql://heat:hf3366++@controller/heat
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = heat
password = hf3366++
[trustee]
auth_type = password
auth_url = http://controller:5000
username = heat
password = hf3366++
user_domain_name = default
[clients_keystone]
auth_uri = http://controller:5000
4. Populate the database
su -s /bin/sh -c "heat-manage db_sync" heat
5. Start
systemctl enable openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service
systemctl start openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service
6. Verification operation
openstack orchestration service list
7.UI
yum install -y openstack-heat-ui
systemctl restart openstack-heat*
systemctl restart httpd

#You can display heat in the dashboard

Topics: Linux OpenStack