Install and use MySQL highly available MGR in Docker (multiple hosts write at the same time)

Posted by ozfred on Fri, 24 Dec 2021 01:05:30 +0100

1, Create 3 MySQL environments 2, Modify MySQL parameters 3, Restart MySQL environment 4, Install MGR plug-in (executed by all nodes) 5, Set replication account (executed by all nodes) 6, Start MGR single main mode 6.1 start MGR and execute it on the main library (172.72.0.15) 6.2 add MGR to other nodes and execute it on the slave Library (172.72.0.16172.72.0.17) 7, Multi master and single master mode switching 7.1. Query current mode 7.2. The function realizes multi master and single master switching 7.2. 1. Single main switch multi main mode 7.2. 2. Multi main switch single main mode 7.3 manual switching 7.3. 1. Single main switch multi main mode 7.3. 2. Multi main switch single main mode 8, Test synchronization 9, MGR new node 9.1. Create a new MySQL node 9.2 install MGR plug-in at new node 9.3. Set a copy account in the new node 9.4. Modify parameters in the original 3 node 9.5. Adding new nodes 9.6. View all nodes 10, Reset MGR configuration

1, Create 3 MySQL environments

 # Pull image
 docker pull mysql:8.0.20
 # Create a private network
 docker network create --subnet=172.72.0.0/24 mysql-network
 
 # Create a directory to store data
 mkdir -p /usr/local/mysql/lhrmgr15/conf.d
 mkdir -p /usr/local/mysql/lhrmgr15/data
 mkdir -p /usr/local/mysql/lhrmgr16/conf.d
 mkdir -p /usr/local/mysql/lhrmgr16/data
 mkdir -p /usr/local/mysql/lhrmgr17/conf.d
 mkdir -p /usr/local/mysql/lhrmgr17/data

# Create MySQL with 3 nodes
docker run -d --name mysql8020mgr33065 \
   -h lhrmgr15 -p 33065:3306 --net=mysql-network --ip 172.72.0.15 \
   -v /usr/local/mysql/lhrmgr15/conf.d:/etc/mysql/conf.d -v /usr/local/mysql/lhrmgr15/data:/var/lib/mysql/ \
   -e MYSQL_ROOT_PASSWORD=lhr \
   -e TZ=Asia/Shanghai \
   mysql:8.0.20

docker run -d --name mysql8020mgr33066 \
   -h lhrmgr16 -p 33066:3306 --net=mysql-network --ip 172.72.0.16 \
   -v /usr/local/mysql/lhrmgr16/conf.d:/etc/mysql/conf.d -v /usr/local/mysql/lhrmgr16/data:/var/lib/mysql/ \
   -e MYSQL_ROOT_PASSWORD=lhr \
   -e TZ=Asia/Shanghai \
   mysql:8.0.20

docker run -d --name mysql8020mgr33067 \
   -h lhrmgr17 -p 33067:3306 --net=mysql-network --ip 172.72.0.17 \
   -v /usr/local/mysql/lhrmgr17/conf.d:/etc/mysql/conf.d -v /usr/local/mysql/lhrmgr17/data:/var/lib/mysql/ \
   -e MYSQL_ROOT_PASSWORD=lhr \
   -e TZ=Asia/Shanghai \
   mysql:8.0.20

2, Modify MySQL parameters

  cat > /usr/local/mysql/lhrmgr15/conf.d/my.cnf <<"EOF"
  [mysqld]
  user=mysql
  port=3306
  character_set_server=utf8mb4
  secure_file_priv=''
  server-id = 802033065
  default-time-zone = '+8:00'
  log_timestamps = SYSTEM
  log-bin = 
  binlog_format=row
  binlog_checksum=NONE
  log-slave-updates=1
  skip-name-resolve
  auto-increment-increment=2
  auto-increment-offset=1
  gtid-mode=ON
  enforce-gtid-consistency=on
  default_authentication_plugin=mysql_native_password
  max_allowed_packet = 500M
 
  master_info_repository=TABLE
  relay_log_info_repository=TABLE
 relay_log=lhrmgr15-relay-bin-ip15
 
 
  transaction_write_set_extraction=XXHASH64
  loose-group_replication_group_name="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
  loose-group_replication_start_on_boot=OFF
  loose-group_replication_local_address= "172.72.0.15:33061"
  loose-group_replication_group_seeds= "172.72.0.15:33061,172.72.0.16:33062,172.72.0.17:33063"
  loose-group_replication_bootstrap_group=OFF
  loose-group_replication_ip_whitelist="172.72.0.15,172.72.0.16,172.72.0.17"
  
  report_host=172.72.0.15
  report_port=3306
 
 EOF

  cat >  /usr/local/mysql/lhrmgr16/conf.d/my.cnf <<"EOF"
  [mysqld]
  user=mysql
  port=3306
  character_set_server=utf8mb4
  secure_file_priv=''
  server-id = 802033066
  default-time-zone = '+8:00'
  log_timestamps = SYSTEM
  log-bin = 
  binlog_format=row
  binlog_checksum=NONE
  log-slave-updates=1
  gtid-mode=ON
  enforce-gtid-consistency=ON
  skip_name_resolve
  default_authentication_plugin=mysql_native_password
  max_allowed_packet = 500M
  
  master_info_repository=TABLE
  relay_log_info_repository=TABLE
  relay_log=lhrmgr16-relay-bin-ip16

  transaction_write_set_extraction=XXHASH64
  loose-group_replication_group_name="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
  loose-group_replication_local_address= "172.72.0.16:33062"
  loose-group_replication_group_seeds= "172.72.0.15:33061,172.72.0.16:33062,172.72.0.17:33063"
  loose-group_replication_bootstrap_group=OFF
  loose-group_replication_ip_whitelist="172.72.0.15,172.72.0.16,172.72.0.17"
  
  report_host=172.72.0.16
  report_port=3306
  
  EOF
 
 
  cat > /usr/local/mysql/lhrmgr17/conf.d/my.cnf <<"EOF"
  [mysqld]
  user=mysql
  port=3306
  character_set_server=utf8mb4
  secure_file_priv=''
  server-id = 802033067
  default-time-zone = '+8:00'
  log_timestamps = SYSTEM
  log-bin = 
  binlog_format=row
  binlog_checksum=NONE
  log-slave-updates=1
  gtid-mode=ON
  enforce-gtid-consistency=ON
  skip_name_resolve
  default_authentication_plugin=mysql_native_password
  max_allowed_packet = 500M

  master_info_repository=TABLE
  relay_log_info_repository=TABLE
  relay_log=lhrmgr16-relay-bin-ip16
 
 transaction_write_set_extraction=XXHASH64
 loose-group_replication_group_name="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
 loose-group_replication_start_on_boot=OFF
 loose-group_replication_local_address= "172.72.0.17:33063"
 loose-group_replication_group_seeds= "172.72.0.15:33061,172.72.0.16:33062,172.72.0.17:33063"
 loose-group_replication_bootstrap_group=OFF
 loose-group_replication_ip_whitelist="172.72.0.15,172.72.0.16,172.72.0.17"
 
 report_host=172.72.0.17
 report_port=3306
 
 EOF

3, Restart MySQL environment

  # Restart MySQL
  docker restart mysql8020mgr33065 mysql8020mgr33066 mysql8020mgr33067
  docker ps
  
  # Enter MySQL
  docker exec -it mysql8020mgr33065 bash
  docker exec -it mysql8020mgr33065 mysql -uroot -plhr
 
  #Remote connection to MySQL
 mysql -uroot -plhr -h192.168.1.35 -P33065 
 mysql -uroot -plhr -h192.168.1.35 -P33066 
 mysql -uroot -plhr -h192.168.1.35 -P33067 
 
 # View MySQL logs
 docker logs -f --tail 10 mysql8020mgr33065
 docker logs -f --tail 10 mysql8020mgr33066
 docker logs -f --tail 10 mysql8020mgr33067
  
 # View the host name and server of MySQL_ ID and server_uuid
 mysql -uroot -plhr -h192.168.1.35 -P33065 -e "select @@hostname,@@server_id,@@server_uuid"
 mysql -uroot -plhr -h192.168.1.35 -P33066 -e "select @@hostname,@@server_id,@@server_uuid"
 mysql -uroot -plhr -h192.168.1.35 -P33067 -e "select @@hostname,@@server_id,@@server_uuid"

result:

[root@docker35 ~]# mysql -uroot -plhr -h192.168.1.35 -P33065 -e "select @@hostname,@@server_id,@@server_uuid"
 mysql: [Warning] Using a password on the command line interface can be insecure.
 +------------+-------------+--------------------------------------+
 | @@hostname | @@server_id | @@server_uuid                        |
 +------------+-------------+--------------------------------------+
 | lhrmgr15   |   802033065 | 611717fe-d785-11ea-9342-0242ac48000f |
 +------------+-------------+--------------------------------------+
 [root@docker35 ~]# mysql -uroot -plhr -h192.168.1.35 -P33066 -e "select @@hostname,@@server_id,@@server_uuid"
 mysql: [Warning] Using a password on the command line interface can be insecure.
 +------------+-------------+--------------------------------------+
 | @@hostname | @@server_id | @@server_uuid                        |
 +------------+-------------+--------------------------------------+
| lhrmgr16   |   802033066 | 67090f47-d785-11ea-b76c-0242ac480010 |
+------------+-------------+--------------------------------------+
[root@docker35 ~]# mysql -uroot -plhr -h192.168.1.35 -P33067 -e "select @@hostname,@@server_id,@@server_uuid"
mysql: [Warning] Using a password on the command line interface can be insecure.
+------------+-------------+--------------------------------------+
| @@hostname | @@server_id | @@server_uuid                        |
+------------+-------------+--------------------------------------+
| lhrmgr17   |   802033067 | 678cf064-d785-11ea-b8ce-0242ac480011 |
+------------+-------------+--------------------------------------+
[root@docker35 ~]# 

4, Install MGR plug-in (executed by all nodes)

 MySQL [(none)]> INSTALL PLUGIN group_replication SONAME 'group_replication.so';
 Query OK, 0 rows affected (0.23 sec)
 
 MySQL [(none)]> show plugins;
 +---------------------------------+----------+--------------------+----------------------+---------+
 | Name                            | Status   | Type               | Library              | License |
 +---------------------------------+----------+--------------------+----------------------+---------+
 | group_replication               | ACTIVE   | GROUP REPLICATION  | group_replication.so | GPL     |
 +---------------------------------+----------+--------------------+----------------------+---------+
45 rows in set (0.00 sec)

5, Set replication account (executed by all nodes)

SET SQL_LOG_BIN=0;
CREATE USER repl@'%' IDENTIFIED BY 'lhr';
GRANT REPLICATION SLAVE ON *.* TO repl@'%';
FLUSH PRIVILEGES;
SET SQL_LOG_BIN=1;
CHANGE MASTER TO MASTER_USER='repl', MASTER_PASSWORD='lhr' FOR CHANNEL 'group_replication_recovery';

Execution process:

MySQL [(none)]> SET SQL_LOG_BIN=0;
Query OK, 0 rows affected (0.00 sec)
 
MySQL [(none)]> CREATE USER repl@'%' IDENTIFIED BY 'lhr';
Query OK, 0 rows affected (0.01 sec)
 
MySQL [(none)]> GRANT REPLICATION SLAVE ON *.* TO repl@'%';
Query OK, 0 rows affected (0.00 sec)
 
MySQL [(none)]> FLUSH PRIVILEGES;
Query OK, 0 rows affected (0.00 sec)

MySQL [(none)]> SET SQL_LOG_BIN=1;
Query OK, 0 rows affected (0.00 sec)

MySQL [(none)]> CHANGE MASTER TO MASTER_USER='repl', MASTER_PASSWORD='lhr' FOR CHANNEL 'group_replication_recovery';
Query OK, 0 rows affected, 1 warning (0.04 sec)

6, Start MGR single main mode

6.1 start MGR and execute it on the main library (172.72.0.15)

SET GLOBAL group_replication_bootstrap_group=ON;
START GROUP_REPLICATION;
SET GLOBAL group_replication_bootstrap_group=OFF;
-- see MGR Group information 
SELECT * FROM performance_schema.replication_group_members;

Execution process:

 MySQL [(none)]> SET GLOBAL group_replication_bootstrap_group=ON;
 Query OK, 0 rows affected (0.00 sec)
 
 MySQL [(none)]> START GROUP_REPLICATION;
 Query OK, 0 rows affected (3.49 sec)
 
 MySQL [(none)]> SET GLOBAL group_replication_bootstrap_group=OFF;
 Query OK, 0 rows affected (0.00 sec)
 
 MySQL [(none)]> SELECT * FROM performance_schema.replication_group_members;
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
 | CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
 | group_replication_applier | 611717fe-d785-11ea-9342-0242ac48000f | 172.72.0.15 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
1 row in set (0.01 sec)

6.2 add MGR to other nodes and execute it on the slave Library (172.72.0.16172.72.0.17)

START GROUP_REPLICATION;
-- see MGR Group information
SELECT * FROM performance_schema.replication_group_members;

Execution results:

MySQL [(none)]> SELECT * FROM performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| group_replication_applier | 611717fe-d785-11ea-9342-0242ac48000f | 172.72.0.15 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
| group_replication_applier | 67090f47-d785-11ea-b76c-0242ac480010 | 172.72.0.16 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
| group_replication_applier | 678cf064-d785-11ea-b8ce-0242ac480011 | 172.72.0.17 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
3 rows in set (0.01 sec)

You can see that the status of the three nodes is online, and the primary node is 172.72 0.15, only the master node can write, other nodes are read-only, and the MGR single master mode is built successfully.

7, Multi master and single master mode switching

7.1. Query current mode

 1MySQL [(none)]>  show variables like '%group_replication_single_primary_mode%';
 | Variable_name                         | Value |
 +---------------------------------------+-------+
 | group_replication_single_primary_mode | ON    |
 +---------------------------------------+-------+
 1 row in set (0.01 sec)
 
 MySQL [(none)]> SELECT @@group_replication_single_primary_mode;
 +-----------------------------------------+
 | @@group_replication_single_primary_mode |
 +-----------------------------------------+
 |                                       1 |
 +-----------------------------------------+
 1 row in set (0.00 sec)

Parameter group_replication_single_primary_mode is ON, indicating single master mode.

7.2. The function realizes multi master and single master switching

Function switching: from MySQL 8.0 13. At the beginning, the MGR mode can be modified online using the function.

-- Single main tangent and multi main
select group_replication_switch_to_multi_primary_mode(); 
-- Multi primary switch to single primary. The input parameter needs to be passed into the primary database server_uuid
select group_replication_switch_to_single_primary_mode('@@server_uuid') ;

-- View group information
SELECT * FROM performance_schema.replication_group_members;

7.2. 1. Single main switch multi main mode

 MySQL [(none)]> SELECT @@group_replication_single_primary_mode;
 +-----------------------------------------+
 | @@group_replication_single_primary_mode |
 +-----------------------------------------+
 |                                       1 |
 +-----------------------------------------+
 1 row in set (0.00 sec)
 
 MySQL [(none)]> SELECT * FROM performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| group_replication_applier | 611717fe-d785-11ea-9342-0242ac48000f | 172.72.0.15 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
| group_replication_applier | 67090f47-d785-11ea-b76c-0242ac480010 | 172.72.0.16 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
| group_replication_applier | 678cf064-d785-11ea-b8ce-0242ac480011 | 172.72.0.17 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
3 rows in set (0.00 sec)

MySQL [(none)]> select group_replication_switch_to_multi_primary_mode();
+--------------------------------------------------+
| group_replication_switch_to_multi_primary_mode() |
+--------------------------------------------------+
| Mode switched to multi-primary successfully.     |
+--------------------------------------------------+
1 row in set (1.01 sec)

MySQL [(none)]> SELECT * FROM performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| group_replication_applier | 611717fe-d785-11ea-9342-0242ac48000f | 172.72.0.15 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
| group_replication_applier | 67090f47-d785-11ea-b76c-0242ac480010 | 172.72.0.16 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
| group_replication_applier | 678cf064-d785-11ea-b8ce-0242ac480011 | 172.72.0.17 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
3 rows in set (0.00 sec)
MySQL [(none)]> SELECT @@group_replication_single_primary_mode;
+-----------------------------------------+
| @@group_replication_single_primary_mode |
+-----------------------------------------+
|                                       0 |
+-----------------------------------------+

7.2. 2. Multi main switch single main mode

 MySQL [(none)]> SELECT @@group_replication_single_primary_mode;
 +-----------------------------------------+
 | @@group_replication_single_primary_mode |
 +-----------------------------------------+
 |                                       0 |
 +-----------------------------------------+
 1 row in set (0.00 sec)
 
 MySQL [(none)]> select group_replication_switch_to_single_primary_mode('67090f47-d785-11ea-b76c-0242ac480010') ;
 +-----------------------------------------------------------------------------------------+
 | group_replication_switch_to_single_primary_mode('67090f47-d785-11ea-b76c-0242ac480010') |
 +-----------------------------------------------------------------------------------------+
 | Mode switched to single-primary successfully.                                           |
 +-----------------------------------------------------------------------------------------+
 1 row in set (1.02 sec)
 
 MySQL [(none)]> SELECT * FROM performance_schema.replication_group_members;
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
 | CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
 | group_replication_applier | 611717fe-d785-11ea-9342-0242ac48000f | 172.72.0.15 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
 | group_replication_applier | 67090f47-d785-11ea-b76c-0242ac480010 | 172.72.0.16 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
 | group_replication_applier | 678cf064-d785-11ea-b8ce-0242ac480011 | 172.72.0.17 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
 3 rows in set (0.00 sec)
 
 MySQL [(none)]> SELECT @@group_replication_single_primary_mode;
 +-----------------------------------------+
 | @@group_replication_single_primary_mode |
 +-----------------------------------------+
 |                                       1 |
 +-----------------------------------------+
 1 row in set (0.00 sec)

7.3 manual switching

To switch the MGR mode, you need to restart group replication, because you need to turn off group replication on all nodes and set group_replication_single_primary_mode=OFF and other parameters, and then start group replication.

7.3. 1. Single main switch multi main mode

1. Stop group replication (performed by all nodes):

stop group_replication;
set global group_replication_single_primary_mode=OFF;
set global group_replication_enforce_update_everywhere_checks=ON;

2. Select any node to execute

SET GLOBAL group_replication_bootstrap_group=ON; 
START GROUP_REPLICATION; 
SET GLOBAL group_replication_bootstrap_group=OFF;

3. Other node execution

START GROUP_REPLICATION; 

4. View group information, members of all nodes_ All roles are PRIMARY

MySQL [(none)]> SELECT * FROM performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| group_replication_applier | 611717fe-d785-11ea-9342-0242ac48000f | 172.72.0.15 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
| group_replication_applier | 67090f47-d785-11ea-b76c-0242ac480010 | 172.72.0.16 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
| group_replication_applier | 678cf064-d785-11ea-b8ce-0242ac480011 | 172.72.0.17 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
3 rows in set (0.00 sec)

It can be seen that all node states are online, roles are PRIMARY, and MGR multi master mode switching is successful.

7.3. 2. Multi main switch single main mode

1. All nodes execute

stop group_replication;
set global group_replication_enforce_update_everywhere_checks=OFF;
set global group_replication_single_primary_mode=ON;

2. The master node (172.72.0.16) executes

SET GLOBAL group_replication_bootstrap_group=ON; 
START GROUP_REPLICATION; 
SET GLOBAL group_replication_bootstrap_group=OFF;

3. From nodes (172.72.0.15, 172.72.0.17)

START GROUP_REPLICATION; 

4. View MGR group information

MySQL [(none)]> SELECT * FROM performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
| group_replication_applier | 611717fe-d785-11ea-9342-0242ac48000f | 172.72.0.15 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
| group_replication_applier | 67090f47-d785-11ea-b76c-0242ac480010 | 172.72.0.16 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
| group_replication_applier | 678cf064-d785-11ea-b8ce-0242ac480011 | 172.72.0.17 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
3 rows in set (0.00 sec)

8, Test synchronization

Execute the following command on the master node, and then query on other nodes:

 create database lhrdb;
 CREATE TABLE lhrdb.`tb1` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `hostname` varchar(100) DEFAULT NULL,
  `server_id` varchar(100) DEFAULT NULL,
  PRIMARY KEY (`id`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
 
insert into lhrdb.tb1(hostname,server_id) select @@hostname,@@server_id;
select * from lhrdb.tb1;

-- 3 The values found in the two nodes are the same
MySQL [(none)]> select * from lhrdb.tb1;
+----+----------+-----------+
| id | hostname | server_id |
+----+----------+-----------+
|  1 | lhrmgr16 | 802033066 |
+----+----------+-----------+
201 row in set (0.02 sec)

9, MGR new node

9.1. Create a new MySQL node

 mkdir -p /usr/local/mysql/lhrmgr18/conf.d
 mkdir -p /usr/local/mysql/lhrmgr18/data
 
 docker run -d --name mysql8020mgr33068 \
   -h lhrmgr18 -p 33068:3306 --net=mysql-network --ip 172.72.0.18 \
   -v /usr/local/mysql/lhrmgr18/conf.d:/etc/mysql/conf.d -v /usr/local/mysql/lhrmgr18/data:/var/lib/mysql/ \
   -e MYSQL_ROOT_PASSWORD=lhr \
   -e TZ=Asia/Shanghai \
    mysql:8.0.20

cat > /usr/local/mysql/lhrmgr18/conf.d/my.cnf <<"EOF"
[mysqld]
user=mysql
port=3306
character_set_server=utf8mb4
secure_file_priv=''
server-id = 802033068
log-bin = 
binlog_format=row
binlog_checksum=NONE
log-slave-updates=1
skip-name-resolve
auto-increment-increment=2
auto-increment-offset=1
gtid-mode=ON
enforce-gtid-consistency=on
default_authentication_plugin=mysql_native_password
max_allowed_packet = 500M
log_slave_updates=on

master_info_repository=TABLE
relay_log_info_repository=TABLE
relay_log=lhrmgr18-relay-bin-ip18

transaction_write_set_extraction=XXHASH64
loose-group_replication_group_name="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
loose-group_replication_start_on_boot=OFF
loose-group_replication_local_address= "172.72.0.18:33064"
loose-group_replication_group_seeds= "172.72.0.15:33061,172.72.0.16:33062,172.72.0.17:33063,172.72.0.18:33064"
loose-group_replication_bootstrap_group=OFF
loose-group_replication_ip_whitelist="172.72.0.15,172.72.0.16,172.72.0.17,172.72.0.18"
report_host=172.72.0.18
report_port=3306

EOF

docker restart mysql8020mgr33068

docker ps
mysql -uroot -plhr -h192.168.1.35 -P33065 -e "select @@hostname,@@server_id,@@server_uuid"
mysql -uroot -plhr -h192.168.1.35 -P33066 -e "select @@hostname,@@server_id,@@server_uuid"
mysql -uroot -plhr -h192.168.1.35 -P33067 -e "select @@hostname,@@server_id,@@server_uuid"
mysql -uroot -plhr -h192.168.1.35 -P33068 -e "select @@hostname,@@server_id,@@server_uuid"
mysql -uroot -plhr -h192.168.1.35 -P33065 
mysql -uroot -plhr -h192.168.1.35 -P33066 
mysql -uroot -plhr -h192.168.1.35 -P33067 
mysql -uroot -plhr -h192.168.1.35 -P33068 
docker logs -f --tail 10 mysql8020mgr33065
docker logs -f --tail 10 mysql8020mgr33066
docker logs -f --tail 10 mysql8020mgr33067
docker logs -f --tail 10 mysql8020mgr33068

9.2 install MGR plug-in at new node

-- install MGR Plug in (new node execution)
INSTALL PLUGIN group_replication SONAME 'group_replication.so';
show plugins;

9.3. Set a copy account in the new node

 -- Set the copy account (executed by adding a node)
 SET SQL_LOG_BIN=0;
 CREATE USER repl@'%' IDENTIFIED BY 'lhr';
 GRANT REPLICATION SLAVE ON *.* TO repl@'%';
 FLUSH PRIVILEGES;
 SET SQL_LOG_BIN=1;
 CHANGE MASTER TO MASTER_USER='repl', MASTER_PASSWORD='lhr' FOR CHANNEL 'group_replication_recovery';

9.4. Modify parameters in the original 3 node

 set global group_replication_group_seeds='172.72.0.15:33061,172.72.0.16:33062,172.72.0.17:33063,172.72.0.18:33064';
 stop group_replication;
 set global group_replication_ip_whitelist="172.72.0.15,172.72.0.16,172.72.0.17,172.72.0.18";
 start group_replication;

9.5. Adding new nodes

  -- 4 Nodes need to ensure that the values of the following two parameters are consistent
 2MySQL [(none)]> select @@group_replication_enforce_update_everywhere_checks,@@group_replication_single_primary_mode;
  +------------------------------------------------------+-----------------------------------------+
  | @@group_replication_enforce_update_everywhere_checks | @@group_replication_single_primary_mode |
  +------------------------------------------------------+-----------------------------------------+
  |                                                    0 |                                       1 |
  +------------------------------------------------------+-----------------------------------------+
 
  -- If not, it needs to be modified
 set global group_replication_single_primary_mode=ON;
 set global group_replication_enforce_update_everywhere_checks=OFF;
 CHANGE MASTER TO MASTER_USER='repl', MASTER_PASSWORD='lhr' FOR CHANNEL 'group_replication_recovery';

-- New node join
 start group_replication;

9.6. View all nodes

 MySQL [lhrdb]> SELECT * FROM performance_schema.replication_group_members;
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
 | CHANNEL_NAME              | MEMBER_ID                            | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION |
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
 | group_replication_applier | 611717fe-d785-11ea-9342-0242ac48000f | 172.72.0.15 |        3306 | ONLINE       | PRIMARY     | 8.0.20         |
 | group_replication_applier | 67090f47-d785-11ea-b76c-0242ac480010 | 172.72.0.16 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
 | group_replication_applier | 678cf064-d785-11ea-b8ce-0242ac480011 | 172.72.0.17 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
 | group_replication_applier | e4d6bf4b-d78d-11ea-b1b6-0242ac480012 | 172.72.0.18 |        3306 | ONLINE       | SECONDARY   | 8.0.20         |
 +---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+
104 rows in set (0.31 sec)

10, Reset MGR configuration

If you need to reset, you need to execute the following command:

1STOP GROUP_REPLICATION;
2reset master;
3SET SQL_LOG_BIN=1;
4CHANGE MASTER TO MASTER_USER='repl', MASTER_PASSWORD='lhr' FOR CHANNEL 'group_replication_recovery';
5start GROUP_REPLICATION;

Next: MySQL deadlock release status →