1、如果主节点已经运行了一段时间,且有大量数据时,如何配置并启动slave节点
#master查看二进制日志
[root@master ~]mysql -uroot -p
MariaDB [(none)]> show master logs;
+--------------------+-----------+
| Log_name | File_size |
+--------------------+-----------+
| mariadb-bin.000001 | 28052 |
| mariadb-bin.000002 | 545 |
+--------------------+-----------+
2 rows in set (0.001 sec)
#授权复制帐号
MariaDB [(none)]>grant replication slave on *.* to repluser@'10.0.0.%'identified by 'magedu';
#master做完全备份
[root@master ~]#mysqldump -A -F --single-transaction --master-data=1 > /backup/fullbackup_`date +%F_%T`.sql
#把备份文件拷贝到slave
[root@master ~]#scp /backup/fullbackup_2020-10-18_17\:41\:17.sql 10.0.0.18:/data/
#slave安装配置数据库
[root@slave ~]#dnf -y install mariadb-server
[root@slave ~]#vim /etc/my.cnf.d/mariadb-server.cnf
[mysqld]
server-id=18
read-only
[root@slave ~]#systemctl restart mariadb
#配置slave,从完全备份的位置之后开始复制
[root@slave ~]#vim /data/fullbackup_2020-10-18_17\:41\:17.sql
CHANGE MASTER TO
MASTER_HOST='10.0.0.8',
MASTER_USER='repluser',
MASTER_PASSWORD='magedu',
MASTER_PORT=3306,
MASTER_LOG_FILE='mariadb-bin.000002',
MASTER_LOG_POS=545;
#slave导入备份数据
[root@slave ~]#mysql < /data/fullbackup_2020-10-18_17\:41\:17.sql
[root@slave ~]#mysql
#开启slave线程
MariaDB [(none)]> start slave;
#查看进程状态
MariaDB [(none)]> show slave status\G;
...省略...
Slave_IO_Running: Yes
Slave_SQL_Running: Yes #两个Yes表示配置成功
...省略...
2、当master服务器宕机,提升一个slave成为新的master
#通过查看中继日志中记录的二进制点,找一个适合的slave节点,让它成为新的master
[root@centos8 ~]#cat /var/lib/mysql/relay-log.info
5
./mariadb-relay-bin.000002
1180
mysql-bin.000002
996
0
#新master修改配置文件,关闭read-only配置,开启二进制日志
[root@slave1 ~]#vim /etc/my.cnf.d/mariadb-server.cnf
[mysqld]
server-id=18
read-only=OFF
log-bin=/data/mysql/logbin/mysql-bin
#清除旧的master复制信息
MariaDB [hellodb]>set global read_only=off;
MariaDB [hellodb]>stop slave;
MariaDB [hellodb]>reset slave all;
#分析旧的master 的二进制日志,将未同步到至新master的二进制日志导出来,恢复到新master,尽可能恢复数据
#在新master上完全备份
[root@slave1 ~]#mysqldump -A --single-transaction --master-data=1 -F > backup.sql
[root@slave1 ~]#scp backup.sql 10.0.0.28:
#其它所有 slave 重新还原数据库,指向新的master
[root@slave2 ~]#vim backup.sql
CHANGE MASTER TO
MASTER_HOST='10.0.0.18',
MASTER_USER='repluser',
MASTER_PASSWORD='centos',
MASTER_PORT=3306,
MASTER_LOG_FILE='mysql-bin.000002',
MASTER_LOG_POS=371;
MariaDB [hellodb]>stop slave;
MariaDB [hellodb]>reset slave all;
MariaDB [hellodb]>set sql_log_bin=off;
MariaDB [hellodb]>source backup.sql;
MariaDB [hellodb]>set sql_log_bin=on;
MariaDB [hellodb]>start slave;
3、通过 MHA 0.58 搭建一个数据库集群结构
环境:四台主机
10.0.0.7 CentOS7 MHA管理端
10.0.0.8 CentOS8 Master
10.0.0.18 CentOS8 Slave1
10.0.0.28 CentOS8 Slave2
#在管理节点上安装两个包mha4mysql-manager和mha4mysql-node
[root@mha-manager ~]#yum -y install mha4mysql-manager-0.58-0.el7.centos.noarch.rpm
[root@mha-manager ~]#yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm
#在所有MySQL服务器上安装mha4mysql-node包
[root@master ~]#yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm
[root@Slave1 ~]#yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm
[root@Slave2 ~]#yum -y install mha4mysql-node-0.58-0.el7.centos.noarch.rpm
#在所有节点实现相互之间ssh key验证
[root@mha-manager ~]#ssh-keygen
[root@mha-manager ~]#ssh-copy-id 10.0.0.7
[root@mha-manager ~]#rsync -av .ssh 10.0.0.8:/root/
[root@mha-manager ~]#rsync -av .ssh 10.0.0.18:/root/
[root@mha-manager ~]#rsync -av .ssh 10.0.0.28:/root/
#在管理节点建立配置文件
[root@mha-manager ~]#mkdir /etc/mastermha/
[root@mha-manager ~]#vim /etc/mastermha/app1.cnf
user=mhauser
password=magedumanager_workdir=/data/mastermha/app1/ manager_log=/data/mastermha/app1/manager.log
remote_workdir=/data/mastermha/app1/ssh_user=root
repl_user=repluser
repl_password=mageduping_interval=1
master_ip_failover_script=/usr/local/bin/master_ip_failover
report_script=/usr/local/bin/sendmail.sh
check_repl_delay=0
mastermaster_binlog_dir=/data/mysql/
[server1]
hostname=10.0.0.8
candidate_master=1
[server2]
hostname=10.0.0.18
candidate_master=1
[server3]
hostname=10.0.0.28
#准备邮件报警脚本,切换VIP的perl脚本
#配置Master
[root@master ~]#mkdir /data/mysql/
[root@master ~]#chown mysql.mysql /data/mysql/
[root@master ~]#vim /etc/my.cnf
[mysqld]
server_id=1
log-bin=/data/mysql/mysql-bin
skip_name_resolve=1
#授权MHA管理帐号和主从复制帐号
mysql>grant replication slave on *.* to repluser@'10.0.0.%' identified by 'magedu';
mysql>grant all on *.* to mhauser@'10.0.0.%' identified by 'magedu';
MariaDB [(none)]> show master logs;
+--------------------+-----------+
| Log_name | File_size |
+--------------------+-----------+
| mariadb-bin.000001 | 245 |
+--------------------+-----------+
2 rows in set (0.001 sec)
#配置VIP
[root@master ~]#ifconfig eth0:1 10.0.0.100/24
#配置slave
[root@slave ~]#mkdir /data/mysql
[root@slave ~]#chown mysql.mysql /data/mysql/
[root@slave ~]#vim /etc/my.cnf
[mysqld]server_id=2
log-bin=/data/mysql/mysql-bin
read_only
relay_log_purge=0
skip_name_resolve=1
#配置主从复制
mysql>CHANGE MASTER TO
MASTER_HOST='10.0.0.8',
MASTER_USER='repluser',
MASTER_PASSWORD='magedu',
MASTER_LOG_FILE='mariadb-bin.000001',
MASTER_LOG_POS=245;
mysql> start slave;
mysql> show slave status\G;
...省略...
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
...省略...
#检查Mha的环境
[root@mha-manager ~]#masterha_check_ssh --conf=/etc/mastermha/app1.cnf
....
Wed Jun 17 09:59:44 2020 - [info] All SSH connection tests passed successfully.
[root@mha-manager ~]#masterha_check_repl --conf=/etc/mastermha/app1.cnf
....
MySQL Replication Health is OK.
[root@mha-manager ~]#masterha_check_status --conf=/etc/mastermha/app1.cnf
app1 is stopped(2:NOT_RUNNING).
#启动MHA
[root@mha-manager ~]#masterha_manager --conf=/etc/mastermha/app1.cnf
Wed Jun 17 10:02:58 2020 - [warning] Global configuration file /etc/masterha_default.cnf not found. Skipping.
Wed Jun 17 10:02:58 2020 - [info] Reading application default configuration from /etc/mastermha/app1.cnf..
Wed Jun 17 10:02:58 2020 - [info] Reading server configuration from /etc/mastermha/app1.cnf..
4、实战案例:Percona XtraDB Cluster(PXC 5.7)
环境:三台主机
pxc1:10.0.0.7
pxc2:10.0.0.17
pxc3:10.0.0.27
#安装 Percona XtraDB Cluster 5.7
#配置YUM源
[root@pxc1 ~]#vim /etc/yum.repos.d/pxc.repo
[percona]name=percona_repo
baseurl = https://mirrors.tuna.tsinghua.edu.cn/percona/release/$releasever/RPMS/$basearch
enabled = 1
gpgcheck = 0
[root@pxc1 ~]#scp /etc/yum.repos.d/pxc.repo 10.0.0.17:/etc/yum.repos.d
[root@pxc1 ~]#scp /etc/yum.repos.d/pxc.repo 10.0.0.27:/etc/yum.repos.d
#在三个节点都安装好PXC 5.7
[root@pxc1 ~]#yum install Percona-XtraDB-Cluster-57 -y
[root@pxc2 ~]#yum install Percona-XtraDB-Cluster-57 -y
[root@pxc3 ~]#yum install Percona-XtraDB-Cluster-57 -y
#在各个节点上分别配置mysql及集群配置文件‘’
[root@pxc1 ~]#vim /etc/percona-xtradb-cluster.conf.d/wsrep.cnf
[mysqld]
wsrep_cluster_address=gcomm://10.0.0.7,10.0.0.17,10.0.0.27 #三个节点的IP
wsrep_node_address=10.0.0.7 #各个节点,指定自已的IP
wsrep_node_name=pxc-cluster-node-1 #各个节点,指定自已节点名称
wsrep_sst_auth="sstuser:s3cretPass" #取消本行注释
[root@pxc2 ~]#vim /etc/percona-xtradb-cluster.conf.d/wsrep.cnf
[mysqld]
wsrep_cluster_address=gcomm://10.0.0.7,10.0.0.17,10.0.0.27 #三个节点的IP
wsrep_node_address=10.0.0.17 #各个节点,指定自已的IP
wsrep_node_name=pxc-cluster-node-2 #各个节点,指定自已节点名称
wsrep_sst_auth="sstuser:s3cretPass" #取消本行注释
[root@pxc3 ~]#vim /etc/percona-xtradb-cluster.conf.d/wsrep.cnf
[mysqld]
wsrep_cluster_address=gcomm://10.0.0.7,10.0.0.17,10.0.0.27 #三个节点的IP
wsrep_node_address=10.0.0.27 #各个节点,指定自已的IP
wsrep_node_name=pxc-cluster-node-3 #各个节点,指定自已节点名称
wsrep_sst_auth="sstuser:s3cretPass" #取消本行注释
#启动PXC集群中第一个节点(任意节点都可以)
[root@pxc1 ~]#systemctl start mysql@bootstrap.service
#查看root密码
[root@pxc1 ~]#grep "temporary password" /var/log/mysqld.log
2020-10-18T02:53:54.292659Z 1 [Note] A temporary password is generated for root@localhost: =tWFP0oRJl8t
#修改root密码
[root@pxc1 ~]#mysql -uroot -p'=tWFP0oRJl8t'
mysql> alter user 'root'@'localhost' identified by 'magedu';
Query OK, 0 rows affected (0.01 sec)
#创建相关用户并授权
mysql> CREATE USER 'sstuser'@'localhost' IDENTIFIED BY 's3cretPass';
Query OK, 0 rows affected (0.00 sec)
mysql> GRANT RELOAD, LOCK TABLES, PROCESS, REPLICATION CLIENT ON *.* TO 'sstuser'@'localhost';
Query OK, 0 rows affected (0.01 sec)
#启动PXC集群中其它所有节点
[root@pxc2 ~]#systemctl start mysql
[root@pxc3 ~]#systemctl start mysql
5、通过 ansible 部署二进制 mysql 8
#准备二进制包
[root@centos8 ~]#ls /opt
mysql-8.0.15-linux-glibc2.12-x86_64.tar.xz
#准备配置文件
[root@centos8 mysql]#cat /etc/my.cnf
[mysqld]
socket=/tmp/mysql.sock
user=mysql
symbolic-links=0
datadir=/data/mysql
innodb_file_per_table=1
log-bin
pid-file=/data/mysql/mysqld.pid
[client]
port=3306
socket=/tmp/mysql.sock
[mysqld_safe]
log-error=/var/log/mysqld.log
#编写yaml文件
[root@centos8 opt]#cat install_mysql8.yml
---
# install mysql-8.0.15-linux-glibc2.12-x86_64
- hosts: 10.0.0.18
remote_user: root
gather_facts: no
tasks:
- name: install packages
yum: name=libaio*,ncurses*,libncurses*,libnuma*,numactl-libs
- name: create mysql group
group: name=mysql gid=306
- name: create mysql user
user: name=mysql uid=306 group=mysql shell=/sbin/nologin system=yes create_home=no
- name: copy tar to remote host and file mode
unarchive: src=/opt/mysql-8.0.15-linux-glibc2.12-x86_64.tar.xz dest=/usr/local/ owner=root group=root
- name: create linkfile /usr/local/mysql
file: src=/usr/local/mysql-8.0.15-linux-glibc2.12-x86_64 dest=/usr/local/mysql state=link
- name: config my.cnf
copy: src=/opt/my.cnf dest=/etc/my.cnf
- name: data dir
shell: /usr/local/mysql/bin/mysqld --user=mysql --basedir=/usr/local/mysql --datadir=/data/mysql --initialize-insecure
- name: service script
shell: /bin/cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqld
- name: enable service
shell: /etc/init.d/mysqld start;chkconfig --add mysqld;chkconfig mysqld on
- name: PATH variable
copy: content="PATH=/usr/local/mysql/bin:$PATH" dest=/etc/profile.d/mysql.sh mode=755
- name: mysql link
shell: ln -s /usr/local/mysql/bin/* /usr/sbin/
- name: change mysql password
shell: mysqladmin password 123456