xtrabackup是一个MySQL备份还原的常用工具,实际使用过程应该都是shell或者python封装的自动化脚本,尤其是备份。
对还原来说,对于基于完整和增量备份的还原,还原差异备份需要指定增量备份等等一系列容易出错的手工操作,以及binlog的还原等,如果纯手工操作的话非常麻烦。
即便是你记性非常好,对xtrabackup非常熟悉,纯手工操作的话,非常容易出错,其实也上网找过,还原没有发现太好用的自动化还原脚本。
于是就自己用Python封装了xtrabackup备份和还原的过程,可以做到自动化备份,基于时间点的自动化还原等等。
需要对xtrabackup有一定的了解,包括流式备份,压缩备份,还原,binlog还原等等。
挤时间写出来的,粗略测了一下没有问题,代码很粗糙,后续会优化。
1,基于xtrabackup的流式压缩备份。
2,周六/或者任意时间的第一次备份为完整备份,其他时间为基于上一次备份的增量备份。
3,生成备份开始时间,结束时间,备份路径的日志列表。
效果如下:第一次必须为增量备份,不管是什么时候,然后根据上述规则,继续执行备份的话为基于最新一次备份的增量备份,每备份完成后生成修改备份日志列表信息。
实现:
1 # -*- coding: utf-8 -*- 2 import os 3 import time 4 import datetime 5 import sys 6 import socket 7 import shutil 8 import logging 9 10 logging.basicConfig(level=logging.INFO 11 #handlers={logging.FileHandler(filename='backup_log_info.log', mode='a', encoding='utf-8')} 12 ) 13 14 15 host = "127.0.0.1" 16 port = "7000" 17 user = "root" 18 password = "root" 19 cnf_file = "/usr/local/mysql57_data/mysql7000/etc/my.cnf" 20 backup_dir = "/usr/local/backupdata" 21 backupfilelist = os.path.join(backup_dir,"backupfilelist.log") 22 backup_keep_days = 15 23 24 #获取备份类型,周六进行完备,平时增量备份,如果没有全备,执行完整备份 25 def get_backup_type(): 26 backup_type = None 27 if os.path.exists(backupfilelist): 28 with open(backupfilelist, 'r') as f: 29 lines = f.readlines() 30 if(lines): 31 last_line = lines[-1] #get last backup name 32 if(last_line): 33 if(time.localtime().tm_wday==6): 34 backup_type = "full" 35 else: 36 backup_type = "incr" 37 else: 38 backup_type = "full" 39 else: 40 backup_type = "full" 41 else: 42 #full backup when first backup 43 open(backupfilelist, "a").close() 44 backup_type = "full" 45 return backup_type 46 47 #获取最后一次备份信息 48 def get_last_backup(): 49 last_backup = None 50 if os.path.exists(backupfilelist): 51 with open(backupfilelist, 'r') as f: 52 lines = f.readlines() 53 last_line = lines[-1] # get last backup name 54 if (last_line): 55 last_backup = os.path.join(backup_dir, last_line.split("|")[-1]) 56 return last_backup.replace("n","") 57 58 59 #探测实例端口号 60 def get_mysqlservice_status(): 61 mysql_stat = 0 62 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 63 result = s.connect_ex((host, int(port))) 64 #port os open 65 if (result == 0): 66 mysql_stat = 1 67 return mysql_stat 68 69 #清理过期的历史备份信息 70 def clean_expired_file(): 71 for backup_name in os.listdir(backup_dir): 72 if os.path.isdir(backup_name): 73 bak_datetime = datetime.datetime.strptime(backup_name.replace("_full","").replace("_incr",""), '%Y%m%d%H%M%S') 74 if(bak_datetime<datetime.datetime.now() - datetime.timedelta(days=backup_keep_days)): 75 shutil.rmtree(os.path.join(backup_dir, backup_name)) 76 77 #完整备份 78 def full_backup(backup_file_name): 79 os.system("[ ! -d {0}/{1} ] && mkdir -p {0}/{1}".format(backup_dir,backup_file_name)) 80 logfile = os.path.join(backup_dir, "{0}/{1}/backuplog.log".format(backup_dir,backup_file_name)) 81 backup_commond = ''' innobackupex --defaults-file={0} --no-lock {1}/{6} --user={2} --password={3} --host="{4}" --port={5} --tmpdir={1}/{6} --stream=xbstream --compress --compress-threads=8 --parallel=4 --extra-lsndir={1}/{6} > {1}/{6}/{6}.xbstream 2>{7} '''. 82 format(cnf_file,backup_dir,user,password,host,port,backup_file_name,logfile) 83 execute_result = os.system(backup_commond) 84 return execute_result 85 86 #增量备份 87 def incr_backup(backup_file_name): 88 os.system("[ ! -d {0}/{1} ] && mkdir -p {0}/{1}".format(backup_dir, backup_file_name)) 89 current_backup_dir = "{0}/{1}".format(backup_dir, backup_file_name) 90 logfile = os.path.join(backup_dir, "{0}/{1}/backuplog.log".format(backup_dir, backup_file_name)) 91 #增量备份基于上一个增量/完整备份 92 incremental_basedir = get_last_backup() 93 backup_commond = '''innobackupex --defaults-file={0} --no-lock {6} --user={2} --password={3} --host={4} --port={5} --stream=xbstream --tmpdir={6} --compress --compress-threads=8 --parallel=4 --extra-lsndir={6} --incremental --incremental-basedir={7} 2> {8} > {6}/{9}.xbstream ''' 94 .format(cnf_file,backup_dir,user,password,host,port,current_backup_dir,incremental_basedir,logfile,backup_file_name) 95 # print(backup_commond) 96 execute_result = os.system(backup_commond) 97 return execute_result 98 99 #刷新binlog,意义不大,原本计划在完整备份之后执行一个binlog的切换,暂时弃用 100 def flush_log(): 101 flush_log_commond = ''' mysql -h${0} -u${1} - p${2} -P${1} mysql - e"flush logs" '''.format(user,password,host,port) 102 os.system(flush_log_commond) 103 104 105 if __name__ == '__main__': 106 mysql_stat = get_mysqlservice_status() 107 backup_type = get_backup_type() 108 if mysql_stat <= 0 : 109 logging.info("mysql instance is inactive,backup exit") 110 sys.exit(1) 111 try: 112 start_time = datetime.datetime.now().strftime('%Y%m%d%_H%M%S') 113 logging.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+"--------start backup") 114 #flush_log() 115 backup_file_name = start_time 116 execute_result = None 117 if(backup_type == "full"): 118 backup_file_name = backup_file_name+"_full" 119 logging.info("execute full backup......") 120 execute_result = full_backup(backup_file_name) 121 if (execute_result == 0): 122 logging.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "--------begin cleanup history backup") 123 logging.info("execute cleanup backup history......") 124 clean_expired_file() 125 logging.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "--------finsh cleanup history backup") 126 else: 127 backup_file_name = backup_file_name + "_incr" 128 logging.info("execute incr backup......") 129 execute_result = incr_backup(backup_file_name) 130 if(execute_result==0): 131 finish_time = datetime.datetime.now().strftime('%Y%m%d%_H%M%S') 132 backup_info = start_time+"|"+finish_time+"|"+start_time+ "_" + backup_type 133 with open(backupfilelist, 'a+') as f: 134 f.write(backup_info + 'n') 135 logging.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+"--------finish backup") 136 else: 137 logging.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "--------xtrabackup failed.please check log") 138 except: 139 raise 140 sys.exit(1)View Code
说直白一点,以这里的基于时间点或者是position的还原,就是一个不断找文件的过程,
1,首先任何还原,都需要一个创建于还原点前的完整备份。
2,基于上述完整备份,利用还原的时间点与xtrbackup的备份日志去做对比来获取所需的增量备份(0个或者1个或者多个)。
3,基于上面两步找到的(完整+增量)备份,利用最后一个备份的position,用于第一个binlog还原时指定start-POSITION,
同时利用binlog的最后修改时间与还原的时间点,决定使用那些binlog,同时最后一个binlog要指定stop-DATETIME = 还原的时间点
1,如何还原时间点的最新的一个完整备份
备份的时候维护一个备份信息,如下,这里是backfilelist.log,包括备份开始时间,结束时间,备份类型,备份路径等。
可以根据备份开始时间,找到第一个早于还原时间点的完整备份
2,如果找到恢复所需要的差异备份
同1,从完整备份开始,依次向后找各个增量备份,直到最后一个早于还原时间点的差异备份,可能有一个或者多个
3,如何找到差异备份之后,需要哪些binlog
基于binlog文件自身的最后修改时间属性信息,从2中找到的最后一个差异备份的时间,开始向后依次找binlog,可能有一个或者多个
如下是一个基于时间点来还原数据库的demo,没写入两条数据,执行一次备份(上述备份会自动区分完整备份或者差异备份)
三次备份之后,继续写两条数据,flush logs,然后继续分两次分别写两条数据,目的是讲数据分散到不同的binlog中,最后删除全部数据
然后基于删除数据之前的时间点来自动生成还原数据库的shell,执行shell即可达到还原数据库的目的。
如下执行基于时间点的rextrabackup.py文件之后,时间点为"2019-08-01 18:50:59",也就是发生删除操作的前一个时间点,来生成的还原信息。
其实只需要重定向到一个shell文件中,执行shell文件即可自动化还原,或者直接在python脚本中执行这些命令,即可自动化完成还原操作。
这里为了显示,打印了出来。
可以发现,基于时间点的还原,找到的文件是预期的:
1个完整备份,2个增量备份,2个binlog日志中的一部分数据,
其中binlog日志还原的start-position成功地衔接到最后一个增量备份的position,同时最后一个binlog日志的还原停留在指定的时间点。
具体的日志信息
################uncompress backup file###################
innobackupex --apply-log --redo-only /temp/restoretmp/20190801184134_full
innobackupex --apply-log --redo-only /temp/restoretmp/20190801184134_full --incremental-dir=/temp/restoretmp/20190801184335_inc
innobackupex --apply-log --redo-only /temp/restoretmp/20190801184134_full --incremental-dir=/temp/restoretmp/20190801184518_inc
innobackupex --apply-log /temp/restoretmp/20190801184134_full
################stop mysql service###################
systemctl stop mysqld_7000
####################backup current database file###########################
mv /usr/local/mysql57_data/mysql7000/data /usr/local/mysql57_data/mysql7000/data_20190801185855
mkdir /usr/local/mysql57_data/mysql7000/data
chown -R mysql.mysql /usr/local/mysql57_data/mysql7000/data
################restore backup data###################
innobackupex --defaults-file=/usr/local/mysql57_data/mysql7000/etc/my.cnf --copy-back --rsync /temp/restoretmp/20190801184134_f
chown -R mysql.mysql /usr/local/mysql57_data/mysql7000/data
################stop mysql service###################
systemctl start mysqld_7000
################restore data from binlog###################
cd /usr/local/mysql57_data/mysql7000/log/bin_log
mysqlbinlog mysql_bin_1300.000001 --skip-gtids=true --start-position=982 | mysql mysql -h127.0.0.1 -uroot -proot -P7000
mysqlbinlog mysql_bin_1300.000002 --skip-gtids=true --stop-datetime="2019-08-01 18:50:59" | mysql -h127.0.0.1 -uroot -proot -P7000
日志信息
# -*- coding: utf-8 -*- import os import time import datetime import sys import socket import logging logging.basicConfig(level=logging.INFO #handlers={logging.FileHandler(filename='restore_log_info.log', mode='a', encoding='utf-8')} ) host = "127.0.0.1" port = "7000" user = "root" password = "root" instance_name = "mysqld_7000" stop_at = "2019-08-01 18:50:59" cnf_file = "/usr/local/mysql57_data/mysql7000/etc/my.cnf" backup_dir = "/usr/local/backupdata/" dest_dir = "/temp/restoretmp/" xtrabackuplog_name = "backuplog.log" backupfilelist = os.path.join(backup_dir,"backupfilelist.log") #根据key值,获取MySQL配置文件中的value def get_config_value(key): value = None if not key: return value if os.path.exists(cnf_file): with open(cnf_file, 'r') as f: for line in f: if (line.split("=")[0]): if(line[0:1]!="#" and line[0:1]!="["): if (key==line.split("=")[0].strip()): value =line.split("=")[1].strip() return value def stop_mysql_service(): print("################stop mysql service###################") print("systemctl stop {}".format(instance_name)) def start_mysql_service(): print("################stop mysql service###################") print("systemctl start {0}".format(instance_name)) #返回备份日志中的最新的一个早于stop_at时间的完整备份,以及其后面的增量备份 def get_restorefile_list(): list_backup = [] list_restore_file = [] if os.path.exists(backupfilelist): with open(backupfilelist, 'r') as f: lines = f.readlines() for line in lines: list_backup.append(line.replace("n","")) if (list_backup): for i in range(len(list_backup) - 1, -1, -1): list_restore_file.append(list_backup[i]) backup_name = list_backup[i].split("|")[2] if "full" in backup_name: full_backup_time = list_backup[i].split("|")[1] if(stop_at<full_backup_time): break else: list_restore_file = None #restore file in the list_restore_log list_restore_file.reverse() return list_restore_file #解压缩需要还原的备份文件,包括一个完整备份以及N个增量备份(N>=0) def uncompress_backup_file(): print("################uncompress backup file###################") list_restore_backup = get_restorefile_list() #如果没有生成时间早于stop_at的完整备份,无法恢复,退出 if not list_restore_backup: raise("There is no backup that can be restored") exit(1) for restore_log in list_restore_backup: #解压备份文件 backup_name = restore_log.split("|")[2] backup_path = restore_log.split("|")[2] backup_full_name = os.path.join(backup_dir,backup_path,backup_name) backup_path = os.path.join(backup_dir,restore_log.split("|")[-1]) #print('''[ ! -d {0} ] && mkdir -p {0}'''.format(os.path.join(dest_dir,backup_name))) os.system('''[ ! -d {0} ] && mkdir -p {0}'''.format(os.path.join(dest_dir,backup_name))) #print("xbstream -x < {0}.xbstream -C {1}".format(backup_full_name,os.path.join(dest_dir,backup_name))) os.system("xbstream -x < {0}.xbstream -C {1}".format(backup_full_name,os.path.join(dest_dir,backup_name))) #print("cd {0}".format(os.path.join(dest_dir,backup_name))) os.system("cd {0}".format(os.path.join(dest_dir,backup_name))) #print('''for f in `find {0}/ -iname "*.qp"`; do qpress -dT4 $f $(dirname $f) && rm -f $f; done '''.format(os.path.join(dest_dir,backup_name))) os.system('''for f in `find {0}/ -iname "*.qp"`; do qpress -dT4 $f $(dirname $f) && rm -f $f; done'''.format(os.path.join(dest_dir,backup_name))) current_backup_begin_time = None current_backup_end_time = None #比较当前备份的结束时间和stop_at,如果当前备份开始时间小于stop_at并且结束时间大于stop_at,解压缩备份结束 with open(os.path.join(dest_dir,backup_name,"xtrabackup_info"), 'r') as f: for line in f: if line and line.split("=")[0].strip()=="start_time": current_backup_begin_time = line.split("=")[1].strip() if line and line.split("=")[0].strip()=="end_time
参与评论
手机查看
返回顶部