shy丶gril 2016-05-24 3170浏览量
1
2
3
4
5
6
7
8
9
10
11
|
# Set mysql library ln -s /usr/local/mysql/lib/libmysqlclient .so.18.0.0 /usr/lib64/libmysqlclient .so.18.0.0
ln -s /usr/local/mysql/lib/libmysqlclient .so.18.0.0 /usr/lib64/libmysqlclient .so.18
ln -s /usr/local/mysql/lib/libmysqlclient .so.18.0.0 /usr/lib64/libmysqlclient .so
ls -l /usr/lib64/libmysqlclient .so.18.0.0
ls -l /usr/lib64/libmysqlclient .so.18
ls -l /usr/lib64/libmysqlclient .so
vim /etc/ld .so.conf.d /mysql-x86_64 .conf
/usr/lib64/mysql /usr/local/mysql/lib ldconfig |
1
2
3
4
5
6
7
8
9
|
cd wget -c https: //www .percona.com /downloads/XtraBackup/Percona-XtraBackup-2 .2.12 /binary/tarball/percona-xtrabackup-2 .2.12-Linux-x86_64. tar .gz
tar zxf percona-xtrabackup-2.2.12-Linux-x86_64. tar .gz
cd percona-xtrabackup-2.2.12-Linux-x86_64 /bin yum -y install perl perl-Time-HiRes
ln -s /root/percona-xtrabackup-2 .2.12-Linux-x86_64 /bin/innobackupex /usr/bin/innobackupex ln -s /root/percona-xtrabackup-2 .2.12-Linux-x86_64 /bin/xbcrypt /usr/bin/xbcrypt ln -s /root/percona-xtrabackup-2 .2.12-Linux-x86_64 /bin/xbstream /usr/bin/xbstream ln -s /root/percona-xtrabackup-2 .2.12-Linux-x86_64 /bin/xtrabackup /usr/bin/xtrabackup
|
1
|
apt-get -y install percona-xtrabackup
|
1
2
3
|
egrep '(log-bin|server-id)' /etc/my .cnf
或 grep -E '(log-bin|server-id)' /etc/my .cnf
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
# for master apt-get install percona-xtrabackup
mv /etc/mysql/my .cnf /etc/mysql/my .cnf~ || mv /etc/mysql /etc/mysql ~
service mysql status # df -h # du -sh /data/mysql/data # 观察一下磁盘使用情况,以及/data/mysql/data MySQL数据库数据目录的占用空间的大小,评估将这些文件scp到从库服务器的哪个位置等等 cd /data mkdir -p /data/mysql-master_data # which innobackupex # 为了避免ssh连接丢失,采用nohup执行 nohup /usr/bin/innobackupex --user=root --password=password /data/mysql-master_data &
#上述命令执行成功后会在/data/mysql-master_data目录下得到一个以时间格式生成的新目录的名字,如/data/mysql-master_data/2015-11-03_12-39-51 # make sure user privileges on scp target # sshpass - noninteractive ssh password provider apt-get -y install sshpass
# 为了避免ssh连接丢失,可以采用nohup执行 sshpass -ppassword scp -o StrictHostKeyChecking=no -P22 -r /data/mysql-master_data vivo@192.168.100.126: /data rm -rf /data/mysql-master_data
|
1
2
3
4
5
6
7
8
9
|
# for slave #此处参考文章下面的“xtraBackup备份原理剖析”参考了解为什么需要apply-log innobackupex --apply-log /data/mysql-master_data/2015-11-03_12-39-51 #查看“/data/mysql-master_data/2015-11-03_12-39-51”内的xtrabackup_binlog_info文件,记录bin-log filename和pos值,以便切换master。 service mysql stop #清空数据库数据目录 rm -rf /data/mysql/data/ *
innobackupex --copy-back /data/mysql-master_data/2015-11-03_12-39-51 chown mysql:mysql -R /data/mysql/data/
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
service mysql start mysql -uroot -ppassword #如果从库的my.cnf中设置了开启二进制日志,为了程序员的某些便利,启用log_bin_trust_function_creators,进一步了解可以参考下方的“MySQL 函数限制”和“MySQL创建方法错误:This function has none of DETERMINISTIC, NO SQL” SHOW VARIABLES LIKE '%func%' ;
SET GLOBAL log_bin_trust_function_creators=1; SHOW VARIABLES LIKE '%func%' ;
CHANGE MASTER TO MASTER_HOST= '192.168.100.132' ,MASTER_USER= 'root' ,MASTER_PASSWORD= 'password' ,MASTER_LOG_FILE= 'mysql-bin.000665' ,MASTER_LOG_POS=51145611;
START SLAVE; SHOW SLAVE STATUS \G; #注意观察Slave_IO_Running: Yes和Slave_SQL_Running: Yes,只有都是Yes的时候才表示正确 # 如果原先主库上配置有event则需要在从库上停掉,毕竟从库不是用于做这个(含有写入操作)的 SHOW VARIABLES LIKE 'event_scheduler' ;
SET GLOBAL event_scheduler=0; SHOW VARIABLES LIKE 'event_scheduler' ;
QUIT; rm -rf /data/mysql-master_data # end |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
# backup cd /data mkdir -p /data/mysql-master_data nohup /usr/bin/innobackupex --user=root --password=password /data/mysql-master_data &
ls /data/mysql/data/ -alh
ls /etc/my .cnf -l
# recovery service mysql stop service mysql status innobackupex --apply-log /data/mysql-master_data/2015-11-03_11-21-10/ rm -rf /data/mysql/data/ *
innobackupex --copy-back /data/mysql-master_data/2015-11-03_11-21-10/ chown mysql:mysql -R /data/mysql/data/ service mysql start rm -rf /data/mysql-master_data
|
版权声明:本文内容由阿里云实名注册用户自发贡献,版权归原作者所有,阿里云开发者社区不拥有其著作权,亦不承担相应法律责任。具体规则请查看《阿里云开发者社区用户服务协议》和《阿里云开发者社区知识产权保护指引》。如果您发现本社区中有涉嫌抄袭的内容,填写侵权投诉表单进行举报,一经查实,本社区将立刻删除涉嫌侵权内容。
分享数据库前沿,解构实战干货,推动数据库技术变革