diff --git a/.gitea/workflows/lint.yml b/.gitea/workflows/lint.yml index 86d2078..754d7df 100644 --- a/.gitea/workflows/lint.yml +++ b/.gitea/workflows/lint.yml @@ -19,7 +19,6 @@ jobs: - run: pip install ruff - run: | ruff check . - ruff fix . # - uses: stefanzweifel/git-auto-commit-action@v4 # with: # commit_message: 'style fixes by ruff' diff --git a/.hadolint.yml b/.hadolint.yml new file mode 100644 index 0000000..1005887 --- /dev/null +++ b/.hadolint.yml @@ -0,0 +1,13 @@ +DL3008failure-threshold: warning +format: tty +ignored: +- DL3007 +override: + error: + - DL3015 + warning: + - DL3015 + info: + - DL3008 + style: + - DL3015 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 2987976..19c744c 100755 --- a/Dockerfile +++ b/Dockerfile @@ -5,13 +5,11 @@ WORKDIR /opt/tisbackup COPY entrypoint.sh /entrypoint.sh COPY . /opt/tisbackup -RUN apt update \ - && apt install --no-install-recommends -y rsync ssh cron \ - && rm -rf /var/lib/apt/lists/* - -RUN /usr/local/bin/python3.12 -m pip install --no-cache-dir -r requirements.txt - -RUN mkdir -p /var/spool/cron/crontabs \ +RUN apt-get update \ + && apt-get install --no-install-recommends -y rsync ssh cron \ + && rm -rf /var/lib/apt/lists/* \ + && /usr/local/bin/python3.12 -m pip install --no-cache-dir -r requirements.txt \ + && mkdir -p /var/spool/cron/crontabs \ && echo '59 03 * * * root /bin/bash /opt/tisbackup/backup.sh' > /etc/crontab \ && echo '' >> /etc/crontab \ && crontab /etc/crontab diff --git a/tisbackup.py b/tisbackup.py index 2788dc8..45c5fa8 100755 --- a/tisbackup.py +++ b/tisbackup.py @@ -23,8 +23,8 @@ import sys from os.path import isfile, join tisbackup_root_dir = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0,os.path.join(tisbackup_root_dir,'lib')) -sys.path.insert(0,os.path.join(tisbackup_root_dir,'libtisbackup')) +sys.path.insert(0, os.path.join(tisbackup_root_dir, "lib")) +sys.path.insert(0, os.path.join(tisbackup_root_dir, "libtisbackup")) import errno import logging @@ -35,24 +35,26 @@ from optparse import OptionParser from iniparse import ConfigParser, ini from libtisbackup.backup_mysql import backup_mysql -#from libtisbackup.backup_vmdk import backup_vmdk -#from libtisbackup.backup_switch import backup_switch + +# from libtisbackup.backup_vmdk import backup_vmdk +# from libtisbackup.backup_switch import backup_switch from libtisbackup.backup_null import backup_null from libtisbackup.backup_pgsql import backup_pgsql from libtisbackup.backup_rsync import backup_rsync, backup_rsync_ssh -#from libtisbackup.backup_oracle import backup_oracle -from libtisbackup.backup_rsync_btrfs import (backup_rsync__btrfs_ssh, - backup_rsync_btrfs) -#from libtisbackup.backup_sqlserver import backup_sqlserver + +# from libtisbackup.backup_oracle import backup_oracle +from libtisbackup.backup_rsync_btrfs import backup_rsync__btrfs_ssh, backup_rsync_btrfs + +# from libtisbackup.backup_sqlserver import backup_sqlserver from libtisbackup.backup_samba4 import backup_samba4 from libtisbackup.backup_xcp_metadata import backup_xcp_metadata from libtisbackup.backup_xva import backup_xva from libtisbackup.common import * from libtisbackup.copy_vm_xcp import copy_vm_xcp -__version__="2.0" +__version__ = "2.0" -usage="""\ +usage = """\ %prog -c configfile action TIS Files Backup system. @@ -67,52 +69,75 @@ action is either : exportbackup : copy lastest OK backups from local to location defned by --exportdir parameter register_existing : scan backup directories and add missing backups to database""" -version="VERSION" +version = "VERSION" + +parser = OptionParser(usage=usage, version="%prog " + version) +parser.add_option( + "-c", "--config", dest="config", default="/etc/tis/tisbackup-config.ini", help="Config file full path (default: %default)" +) +parser.add_option("-d", "--dry-run", dest="dry_run", default=False, action="store_true", help="Dry run (default: %default)") +parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true", help="More information (default: %default)") +parser.add_option( + "-s", "--sections", dest="sections", default="", help="Comma separated list of sections (backups) to process (default: All)" +) +parser.add_option( + "-l", + "--loglevel", + dest="loglevel", + default="info", + type="choice", + choices=["debug", "warning", "info", "error", "critical"], + metavar="LOGLEVEL", + help="Loglevel (default: %default)", +) +parser.add_option("-n", "--len", dest="statscount", default=30, type="int", help="Number of lines to list for dumpstat (default: %default)") +parser.add_option( + "-b", + "--backupdir", + dest="backup_base_dir", + default="", + help="Base directory for all backups (default: [global] backup_base_dir in config file)", +) +parser.add_option( + "-x", "--exportdir", dest="exportdir", default="", help="Directory where to export latest backups with exportbackup (nodefault)" +) -parser=OptionParser(usage=usage,version="%prog " + version) -parser.add_option("-c","--config", dest="config", default='/etc/tis/tisbackup-config.ini', help="Config file full path (default: %default)") -parser.add_option("-d","--dry-run", dest="dry_run", default=False, action='store_true', help="Dry run (default: %default)") -parser.add_option("-v","--verbose", dest="verbose", default=False, action='store_true', help="More information (default: %default)") -parser.add_option("-s","--sections", dest="sections", default='', help="Comma separated list of sections (backups) to process (default: All)") -parser.add_option("-l","--loglevel", dest="loglevel", default='info', type='choice', choices=['debug','warning','info','error','critical'], metavar='LOGLEVEL',help="Loglevel (default: %default)") -parser.add_option("-n","--len", dest="statscount", default=30, type='int', help="Number of lines to list for dumpstat (default: %default)") -parser.add_option("-b","--backupdir", dest="backup_base_dir", default='', help="Base directory for all backups (default: [global] backup_base_dir in config file)") -parser.add_option("-x","--exportdir", dest="exportdir", default='', help="Directory where to export latest backups with exportbackup (nodefault)") class tis_backup: - logger = logging.getLogger('tisbackup') + logger = logging.getLogger("tisbackup") - def __init__(self,dry_run=False,verbose=False,backup_base_dir=''): + def __init__(self, dry_run=False, verbose=False, backup_base_dir=""): self.dry_run = dry_run self.verbose = verbose self.backup_base_dir = backup_base_dir - self.backup_base_dir = '' + self.backup_base_dir = "" self.backup_list = [] self.dry_run = dry_run - self.verbose=False + self.verbose = False - def read_ini_file(self,filename): + def read_ini_file(self, filename): ini.change_comment_syntax() cp = ConfigParser() cp.read(filename) if not self.backup_base_dir: - self.backup_base_dir = cp.get('global','backup_base_dir') + self.backup_base_dir = cp.get("global", "backup_base_dir") if not os.path.isdir(self.backup_base_dir): - self.logger.info('Creating backup directory %s' % self.backup_base_dir) + self.logger.info("Creating backup directory %s" % self.backup_base_dir) os.makedirs(self.backup_base_dir) self.logger.debug("backup directory : " + self.backup_base_dir) - self.dbstat = BackupStat(os.path.join(self.backup_base_dir,'log','tisbackup.sqlite')) + self.dbstat = BackupStat(os.path.join(self.backup_base_dir, "log", "tisbackup.sqlite")) for section in cp.sections(): - if (section != 'global'): + if section != "global": self.logger.debug("reading backup config " + section) backup_item = None - type = cp.get(section,'type') + type = cp.get(section, "type") - backup_item = backup_drivers[type](backup_name=section, - backup_dir=os.path.join(self.backup_base_dir,section),dbstat=self.dbstat,dry_run=self.dry_run) + backup_item = backup_drivers[type]( + backup_name=section, backup_dir=os.path.join(self.backup_base_dir, section), dbstat=self.dbstat, dry_run=self.dry_run + ) backup_item.read_config(cp) backup_item.verbose = self.verbose @@ -122,35 +147,34 @@ class tis_backup: # TODO socket.gethostbyaddr('64.236.16.20') # TODO limit backup to one backup on the command line - - def checknagios(self,sections=[]): + def checknagios(self, sections=[]): try: if not sections: sections = [backup_item.backup_name for backup_item in self.backup_list] - self.logger.debug('Start of check nagios for %s' % (','.join(sections),)) + self.logger.debug("Start of check nagios for %s" % (",".join(sections),)) try: worst_nagiosstatus = None ok = [] warning = [] critical = [] unknown = [] - nagiosoutput = '' + nagiosoutput = "" for backup_item in self.backup_list: if not sections or backup_item.backup_name in sections: - (nagiosstatus,log) = backup_item.checknagios() + (nagiosstatus, log) = backup_item.checknagios() if nagiosstatus == nagiosStateCritical: - critical.append((backup_item.backup_name,log)) - elif nagiosstatus == nagiosStateWarning : - warning.append((backup_item.backup_name,log)) + critical.append((backup_item.backup_name, log)) + elif nagiosstatus == nagiosStateWarning: + warning.append((backup_item.backup_name, log)) elif nagiosstatus == nagiosStateOk: - ok.append((backup_item.backup_name,log)) + ok.append((backup_item.backup_name, log)) else: - unknown.append((backup_item.backup_name,log)) - self.logger.debug('[%s] nagios:"%i" log: %s',backup_item.backup_name,nagiosstatus,log) + unknown.append((backup_item.backup_name, log)) + self.logger.debug('[%s] nagios:"%i" log: %s', backup_item.backup_name, nagiosstatus, log) if not ok and not critical and not unknown and not warning: - self.logger.debug('Nothing processed') + self.logger.debug("Nothing processed") worst_nagiosstatus = nagiosStateUnknown nagiosoutput = 'UNKNOWN : Unknown backup sections "%s"' % sections @@ -159,155 +183,154 @@ class tis_backup: if unknown: if not worst_nagiosstatus: worst_nagiosstatus = nagiosStateUnknown - nagiosoutput = 'UNKNOWN status backups %s' % (','.join([b[0] for b in unknown])) + nagiosoutput = "UNKNOWN status backups %s" % (",".join([b[0] for b in unknown])) globallog.extend(unknown) if critical: if not worst_nagiosstatus: worst_nagiosstatus = nagiosStateCritical - nagiosoutput = 'CRITICAL backups %s' % (','.join([b[0] for b in critical])) + nagiosoutput = "CRITICAL backups %s" % (",".join([b[0] for b in critical])) globallog.extend(critical) if warning: if not worst_nagiosstatus: worst_nagiosstatus = nagiosStateWarning - nagiosoutput = 'WARNING backups %s' % (','.join([b[0] for b in warning])) + nagiosoutput = "WARNING backups %s" % (",".join([b[0] for b in warning])) globallog.extend(warning) if ok: if not worst_nagiosstatus: worst_nagiosstatus = nagiosStateOk - nagiosoutput = 'OK backups %s' % (','.join([b[0] for b in ok])) + nagiosoutput = "OK backups %s" % (",".join([b[0] for b in ok])) globallog.extend(ok) if worst_nagiosstatus == nagiosStateOk: - nagiosoutput = 'ALL backups OK %s' % (','.join(sections)) - + nagiosoutput = "ALL backups OK %s" % (",".join(sections)) except BaseException as e: worst_nagiosstatus = nagiosStateCritical - nagiosoutput = 'EXCEPTION',"Critical : %s" % str(e) + nagiosoutput = "EXCEPTION", "Critical : %s" % str(e) raise finally: - self.logger.debug('worst nagios status :"%i"',worst_nagiosstatus) - print('%s (tisbackup V%s)' %(nagiosoutput,version)) - print('\n'.join(["[%s]:%s" % (log_elem[0],log_elem[1]) for log_elem in globallog])) + self.logger.debug('worst nagios status :"%i"', worst_nagiosstatus) + print("%s (tisbackup V%s)" % (nagiosoutput, version)) + print("\n".join(["[%s]:%s" % (log_elem[0], log_elem[1]) for log_elem in globallog])) sys.exit(worst_nagiosstatus) - def process_backup(self,sections=[]): + def process_backup(self, sections=[]): processed = [] errors = [] if not sections: sections = [backup_item.backup_name for backup_item in self.backup_list] - self.logger.info('Processing backup for %s' % (','.join(sections)) ) + self.logger.info("Processing backup for %s" % (",".join(sections))) for backup_item in self.backup_list: if not sections or backup_item.backup_name in sections: try: - assert(isinstance(backup_item,backup_generic)) - self.logger.info('Processing [%s]',(backup_item.backup_name)) + assert isinstance(backup_item, backup_generic) + self.logger.info("Processing [%s]", (backup_item.backup_name)) stats = backup_item.process_backup() - processed.append((backup_item.backup_name,stats)) + processed.append((backup_item.backup_name, stats)) except BaseException as e: - self.logger.critical('Backup [%s] processed with error : %s',backup_item.backup_name,e) - errors.append((backup_item.backup_name,str(e))) + self.logger.critical("Backup [%s] processed with error : %s", backup_item.backup_name, e) + errors.append((backup_item.backup_name, str(e))) if not processed and not errors: - self.logger.critical('No backup properly finished or processed') + self.logger.critical("No backup properly finished or processed") else: if processed: - self.logger.info('Backup processed : %s' , ",".join([b[0] for b in processed])) + self.logger.info("Backup processed : %s", ",".join([b[0] for b in processed])) if errors: - self.logger.error('Backup processed with errors: %s' , ",".join([b[0] for b in errors])) + self.logger.error("Backup processed with errors: %s", ",".join([b[0] for b in errors])) - def export_backups(self,sections=[],exportdir=''): + def export_backups(self, sections=[], exportdir=""): processed = [] errors = [] if not sections: sections = [backup_item.backup_name for backup_item in self.backup_list] - self.logger.info('Exporting OK backups for %s to %s' % (','.join(sections),exportdir) ) + self.logger.info("Exporting OK backups for %s to %s" % (",".join(sections), exportdir)) for backup_item in self.backup_list: if backup_item.backup_name in sections: try: - assert(isinstance(backup_item,backup_generic)) - self.logger.info('Processing [%s]',(backup_item.backup_name)) + assert isinstance(backup_item, backup_generic) + self.logger.info("Processing [%s]", (backup_item.backup_name)) stats = backup_item.export_latestbackup(destdir=exportdir) - processed.append((backup_item.backup_name,stats)) + processed.append((backup_item.backup_name, stats)) except BaseException as e: - self.logger.critical('Export Backup [%s] processed with error : %s',backup_item.backup_name,e) - errors.append((backup_item.backup_name,str(e))) + self.logger.critical("Export Backup [%s] processed with error : %s", backup_item.backup_name, e) + errors.append((backup_item.backup_name, str(e))) if not processed and not errors: - self.logger.critical('No export backup properly finished or processed') + self.logger.critical("No export backup properly finished or processed") else: if processed: - self.logger.info('Export Backups processed : %s' , ",".join([b[0] for b in processed])) + self.logger.info("Export Backups processed : %s", ",".join([b[0] for b in processed])) if errors: - self.logger.error('Export Backups processed with errors: %s' , ",".join([b[0] for b in errors])) + self.logger.error("Export Backups processed with errors: %s", ",".join([b[0] for b in errors])) - def retry_failed_backups(self,maxage_hours=30): + def retry_failed_backups(self, maxage_hours=30): processed = [] errors = [] # before mindate, backup is too old mindate = datetime2isodate((datetime.datetime.now() - datetime.timedelta(hours=maxage_hours))) - failed_backups = self.dbstat.query("""\ + failed_backups = self.dbstat.query( + """\ select distinct backup_name as bname from stats - where status="OK" and backup_start>=?""",(mindate,)) - - - defined_backups = list(map(lambda f:f.backup_name, [ x for x in self.backup_list if not isinstance(x, backup_null) ])) - failed_backups_names = set(defined_backups) - set([b['bname'] for b in failed_backups if b['bname'] in defined_backups]) + where status="OK" and backup_start>=?""", + (mindate,), + ) + defined_backups = list(map(lambda f: f.backup_name, [x for x in self.backup_list if not isinstance(x, backup_null)])) + failed_backups_names = set(defined_backups) - set([b["bname"] for b in failed_backups if b["bname"] in defined_backups]) if failed_backups_names: - self.logger.info('Processing backup for %s',','.join(failed_backups_names)) + self.logger.info("Processing backup for %s", ",".join(failed_backups_names)) for backup_item in self.backup_list: if backup_item.backup_name in failed_backups_names: try: - assert(isinstance(backup_item,backup_generic)) - self.logger.info('Processing [%s]',(backup_item.backup_name)) + assert isinstance(backup_item, backup_generic) + self.logger.info("Processing [%s]", (backup_item.backup_name)) stats = backup_item.process_backup() - processed.append((backup_item.backup_name,stats)) + processed.append((backup_item.backup_name, stats)) except BaseException as e: - self.logger.critical('Backup [%s] not processed, error : %s',backup_item.backup_name,e) - errors.append((backup_item.backup_name,str(e))) + self.logger.critical("Backup [%s] not processed, error : %s", backup_item.backup_name, e) + errors.append((backup_item.backup_name, str(e))) if not processed and not errors: - self.logger.critical('No backup properly finished or processed') + self.logger.critical("No backup properly finished or processed") else: if processed: - self.logger.info('Backup processed : %s' , ",".join([b[0] for b in errors])) + self.logger.info("Backup processed : %s", ",".join([b[0] for b in errors])) if errors: - self.logger.error('Backup processed with errors: %s' , ",".join([b[0] for b in errors])) + self.logger.error("Backup processed with errors: %s", ",".join([b[0] for b in errors])) else: - self.logger.info('No recent failed backups found in database') + self.logger.info("No recent failed backups found in database") - - def cleanup_backup_section(self,sections = []): + def cleanup_backup_section(self, sections=[]): processed = False if not sections: sections = [backup_item.backup_name for backup_item in self.backup_list] - self.logger.info('Processing cleanup for %s' % (','.join(sections)) ) + self.logger.info("Processing cleanup for %s" % (",".join(sections))) for backup_item in self.backup_list: if backup_item.backup_name in sections: try: - assert(isinstance(backup_item,backup_generic)) - self.logger.info('Processing cleanup of [%s]',(backup_item.backup_name)) + assert isinstance(backup_item, backup_generic) + self.logger.info("Processing cleanup of [%s]", (backup_item.backup_name)) backup_item.cleanup_backup() processed = True except BaseException as e: - self.logger.critical('Cleanup of [%s] not processed, error : %s',backup_item.backup_name,e) + self.logger.critical("Cleanup of [%s] not processed, error : %s", backup_item.backup_name, e) if not processed: - self.logger.critical('No cleanup properly finished or processed') + self.logger.critical("No cleanup properly finished or processed") - def register_existingbackups(self,sections = []): + def register_existingbackups(self, sections=[]): if not sections: sections = [backup_item.backup_name for backup_item in self.backup_list] - self.logger.info('Append existing backups to database...') + self.logger.info("Append existing backups to database...") for backup_item in self.backup_list: if backup_item.backup_name in sections: backup_item.register_existingbackups() @@ -315,26 +338,26 @@ class tis_backup: def html_report(self): for backup_item in self.backup_list: if not section or section == backup_item.backup_name: - assert(isinstance(backup_item,backup_generic)) + assert isinstance(backup_item, backup_generic) if not maxage_hours: maxage_hours = backup_item.maximum_backup_age - (nagiosstatus,log) = backup_item.checknagios(maxage_hours=maxage_hours) - globallog.append('[%s] %s' % (backup_item.backup_name,log)) - self.logger.debug('[%s] nagios:"%i" log: %s',backup_item.backup_name,nagiosstatus,log) - #processed = True + (nagiosstatus, log) = backup_item.checknagios(maxage_hours=maxage_hours) + globallog.append("[%s] %s" % (backup_item.backup_name, log)) + self.logger.debug('[%s] nagios:"%i" log: %s', backup_item.backup_name, nagiosstatus, log) + # processed = True # if nagiosstatus >= worst_nagiosstatus: # worst_nagiosstatus = nagiosstatus def main(): - (options,args)=parser.parse_args() + (options, args) = parser.parse_args() if len(args) != 1: print("ERROR : You must provide one action to perform") parser.print_usage() sys.exit(2) - backup_start_date = datetime.datetime.now().strftime('%Y%m%d-%Hh%Mm%S') + backup_start_date = datetime.datetime.now().strftime("%Y%m%d-%Hh%Mm%S") # options action = args[0] @@ -343,23 +366,23 @@ def main(): print(backup_drivers[t].get_help()) sys.exit(0) - config_file =options.config + config_file = options.config dry_run = options.dry_run verbose = options.verbose loglevel = options.loglevel # setup Logger - logger = logging.getLogger('tisbackup') + logger = logging.getLogger("tisbackup") hdlr = logging.StreamHandler() - hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) + hdlr.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) logger.addHandler(hdlr) # set loglevel - if loglevel in ('debug','warning','info','error','critical'): + if loglevel in ("debug", "warning", "info", "error", "critical"): numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): - raise ValueError('Invalid log level: %s' % loglevel) + raise ValueError("Invalid log level: %s" % loglevel) logger.setLevel(numeric_level) # Config file @@ -370,28 +393,28 @@ def main(): cp = ConfigParser() cp.read(config_file) - backup_base_dir = options.backup_base_dir or cp.get('global','backup_base_dir') - log_dir = os.path.join(backup_base_dir,'log') + backup_base_dir = options.backup_base_dir or cp.get("global", "backup_base_dir") + log_dir = os.path.join(backup_base_dir, "log") if not os.path.exists(log_dir): os.makedirs(log_dir) # if we run the nagios check, we don't create log file, everything is piped to stdout - if action!='checknagios': + if action != "checknagios": try: - hdlr = logging.FileHandler(os.path.join(log_dir,'tisbackup_%s.log' % (backup_start_date))) - hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) + hdlr = logging.FileHandler(os.path.join(log_dir, "tisbackup_%s.log" % (backup_start_date))) + hdlr.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) logger.addHandler(hdlr) except IOError as e: - if action == 'cleanup' and e.errno == errno.ENOSPC: + if action == "cleanup" and e.errno == errno.ENOSPC: logger.warning("No space left on device, disabling file logging.") else: raise e # Main - backup = tis_backup(dry_run=dry_run,verbose=verbose,backup_base_dir=backup_base_dir) + backup = tis_backup(dry_run=dry_run, verbose=verbose, backup_base_dir=backup_base_dir) backup.read_ini_file(config_file) - backup_sections = options.sections.split(',') if options.sections else [] + backup_sections = options.sections.split(",") if options.sections else [] all_sections = [backup_item.backup_name for backup_item in backup.backup_list] if not backup_sections: @@ -399,7 +422,7 @@ def main(): else: for b in backup_sections: if b not in all_sections: - raise Exception('Section %s is not defined in config file' % b) + raise Exception("Section %s is not defined in config file" % b) if dry_run: logger.warning("WARNING : DRY RUN, nothing will be done, just printing on screen...") @@ -408,23 +431,22 @@ def main(): backup.process_backup(backup_sections) elif action == "exportbackup": if not options.exportdir: - raise Exception('No export directory supplied dor exportbackup action') - backup.export_backups(backup_sections,options.exportdir) + raise Exception("No export directory supplied dor exportbackup action") + backup.export_backups(backup_sections, options.exportdir) elif action == "cleanup": backup.cleanup_backup_section(backup_sections) elif action == "checknagios": backup.checknagios(backup_sections) elif action == "dumpstat": for s in backup_sections: - backup.dbstat.last_backups(s,count=options.statscount) + backup.dbstat.last_backups(s, count=options.statscount) elif action == "retryfailed": backup.retry_failed_backups() elif action == "register_existing": backup.register_existingbackups(backup_sections) - else: - logger.error('Unhandled action "%s", quitting...',action) + logger.error('Unhandled action "%s", quitting...', action) sys.exit(1) diff --git a/tisbackup_gui.py b/tisbackup_gui.py index ea99d99..12a51c6 100755 --- a/tisbackup_gui.py +++ b/tisbackup_gui.py @@ -22,8 +22,8 @@ import sys from os.path import isfile, join tisbackup_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__))) -sys.path.append(os.path.join(tisbackup_root_dir,'lib')) -sys.path.append(os.path.join(tisbackup_root_dir,'libtisbackup')) +sys.path.append(os.path.join(tisbackup_root_dir, "lib")) +sys.path.append(os.path.join(tisbackup_root_dir, "libtisbackup")) import glob @@ -34,9 +34,7 @@ import time from shutil import * from urllib.parse import urlparse -from flask import (Flask, Response, abort, appcontext_pushed, flash, g, - jsonify, redirect, render_template, request, session, - url_for) +from flask import Flask, Response, abort, appcontext_pushed, flash, g, jsonify, redirect, render_template, request, session, url_for from iniparse import ConfigParser, RawConfigParser from config import huey @@ -47,61 +45,61 @@ from tisbackup import tis_backup cp = ConfigParser() cp.read("/etc/tis/tisbackup_gui.ini") -CONFIG = cp.get('general','config_tisbackup').split(",") -SECTIONS = cp.get('general','sections') -ADMIN_EMAIL = cp.get('general','ADMIN_EMAIL') -BASE_DIR = cp.get('general','base_config_dir') +CONFIG = cp.get("general", "config_tisbackup").split(",") +SECTIONS = cp.get("general", "sections") +ADMIN_EMAIL = cp.get("general", "ADMIN_EMAIL") +BASE_DIR = cp.get("general", "base_config_dir") -tisbackup_config_file= CONFIG[0] -config_number=0 +tisbackup_config_file = CONFIG[0] +config_number = 0 cp = ConfigParser() cp.read(tisbackup_config_file) -backup_base_dir = cp.get('global','backup_base_dir') -dbstat = BackupStat(os.path.join(backup_base_dir,'log','tisbackup.sqlite')) +backup_base_dir = cp.get("global", "backup_base_dir") +dbstat = BackupStat(os.path.join(backup_base_dir, "log", "tisbackup.sqlite")) mindate = None error = None info = None app = Flask(__name__) -app.secret_key = 'fsiqefiuqsefARZ4Zfesfe34234dfzefzfe' -app.config['PROPAGATE_EXCEPTIONS'] = True +app.secret_key = "fsiqefiuqsefARZ4Zfesfe34234dfzefzfe" +app.config["PROPAGATE_EXCEPTIONS"] = True -tasks_db = os.path.join(tisbackup_root_dir,"tasks.sqlite") +tasks_db = os.path.join(tisbackup_root_dir, "tasks.sqlite") def read_all_configs(base_dir): raw_configs = [] list_config = [] - #config_base_dir = base_dir - + # config_base_dir = base_dir + for file in os.listdir(base_dir): - if isfile(join(base_dir,file)): - raw_configs.append(join(base_dir,file)) - + if isfile(join(base_dir, file)): + raw_configs.append(join(base_dir, file)) + for elem in raw_configs: line = open(elem).readline() - if 'global' in line: + if "global" in line: list_config.append(elem) backup_dict = {} - backup_dict['rsync_ssh_list'] = [] - backup_dict['rsync_btrfs_list'] = [] - backup_dict['rsync_list'] = [] - backup_dict['null_list'] = [] - backup_dict['pgsql_list'] = [] - backup_dict['mysql_list'] = [] - #backup_dict['sqlserver_list'] = [] - backup_dict['xva_list'] = [] - backup_dict['metadata_list'] = [] - #backup_dict['switch_list'] = [] - #backup_dict['oracle_list'] = [] + backup_dict["rsync_ssh_list"] = [] + backup_dict["rsync_btrfs_list"] = [] + backup_dict["rsync_list"] = [] + backup_dict["null_list"] = [] + backup_dict["pgsql_list"] = [] + backup_dict["mysql_list"] = [] + # backup_dict['sqlserver_list'] = [] + backup_dict["xva_list"] = [] + backup_dict["metadata_list"] = [] + # backup_dict['switch_list'] = [] + # backup_dict['oracle_list'] = [] result = [] cp = ConfigParser() for config_file in list_config: cp.read(config_file) - backup_base_dir = cp.get('global', 'backup_base_dir') + backup_base_dir = cp.get("global", "backup_base_dir") backup = tis_backup(backup_base_dir=backup_base_dir) backup.read_ini_file(config_file) @@ -113,7 +111,7 @@ def read_all_configs(base_dir): else: for b in backup_sections: if b not in all_sections: - raise Exception('Section %s is not defined in config file' % b) + raise Exception("Section %s is not defined in config file" % b) # never used.. # if not backup_sections: @@ -128,35 +126,28 @@ def read_all_configs(base_dir): result.append(b) for row in result: - backup_name = row['backup_name'] - server_name = row['server_name'] - backup_type = row['type'] + backup_name = row["backup_name"] + server_name = row["server_name"] + backup_type = row["type"] if backup_type == "xcp-dump-metadata": - backup_dict['metadata_list'].append( - [server_name, backup_name, backup_type, ""]) + backup_dict["metadata_list"].append([server_name, backup_name, backup_type, ""]) if backup_type == "rsync+ssh": - remote_dir = row['remote_dir'] - backup_dict['rsync_ssh_list'].append( - [server_name, backup_name, backup_type, remote_dir]) + remote_dir = row["remote_dir"] + backup_dict["rsync_ssh_list"].append([server_name, backup_name, backup_type, remote_dir]) if backup_type == "rsync+btrfs+ssh": - remote_dir = row['remote_dir'] - backup_dict['rsync_btrfs_list'].append( - [server_name, backup_name, backup_type, remote_dir]) + remote_dir = row["remote_dir"] + backup_dict["rsync_btrfs_list"].append([server_name, backup_name, backup_type, remote_dir]) if backup_type == "rsync": - remote_dir = row['remote_dir'] - backup_dict['rsync_list'].append( - [server_name, backup_name, backup_type, remote_dir]) + remote_dir = row["remote_dir"] + backup_dict["rsync_list"].append([server_name, backup_name, backup_type, remote_dir]) if backup_type == "null": - backup_dict['null_list'].append( - [server_name, backup_name, backup_type, ""]) + backup_dict["null_list"].append([server_name, backup_name, backup_type, ""]) if backup_type == "pgsql+ssh": - db_name = row['db_name'] if len(row['db_name']) > 0 else '*' - backup_dict['pgsql_list'].append( - [server_name, backup_name, backup_type, db_name]) + db_name = row["db_name"] if len(row["db_name"]) > 0 else "*" + backup_dict["pgsql_list"].append([server_name, backup_name, backup_type, db_name]) if backup_type == "mysql+ssh": - db_name = row['db_name'] if len(row['db_name']) > 0 else '*' - backup_dict['mysql_list'].append( - [server_name, backup_name, backup_type, db_name]) + db_name = row["db_name"] if len(row["db_name"]) > 0 else "*" + backup_dict["mysql_list"].append([server_name, backup_name, backup_type, db_name]) # if backup_type == "sqlserver+ssh": # db_name = row['db_name'] # backup_dict['sqlserver_list'].append( @@ -166,12 +157,11 @@ def read_all_configs(base_dir): # backup_dict['oracle_list'].append( # [server_name, backup_name, backup_type, db_name]) if backup_type == "xen-xva": - backup_dict['xva_list'].append( - [server_name, backup_name, backup_type, ""]) + backup_dict["xva_list"].append([server_name, backup_name, backup_type, ""]) # if backup_type == "switch": # backup_dict['switch_list'].append( # [server_name, backup_name, backup_type, ""]) - + return backup_dict @@ -180,7 +170,7 @@ def read_config(): cp = ConfigParser() cp.read(config_file) - backup_base_dir = cp.get('global','backup_base_dir') + backup_base_dir = cp.get("global", "backup_base_dir") backup = tis_backup(backup_base_dir=backup_base_dir) backup.read_ini_file(config_file) @@ -192,10 +182,10 @@ def read_config(): else: for b in backup_sections: if b not in all_sections: - raise Exception('Section %s is not defined in config file' % b) + raise Exception("Section %s is not defined in config file" % b) result = [] - + # not used ... # if not backup_sections: # sections = [backup_item.backup_name for backup_item in backup.backup_list] @@ -203,46 +193,46 @@ def read_config(): for backup_item in backup.backup_list: if backup_item.backup_name in backup_sections: b = {} - for attrib_name in backup_item.required_params+backup_item.optional_params: - if hasattr(backup_item,attrib_name): - b[attrib_name] = getattr(backup_item,attrib_name) + for attrib_name in backup_item.required_params + backup_item.optional_params: + if hasattr(backup_item, attrib_name): + b[attrib_name] = getattr(backup_item, attrib_name) result.append(b) backup_dict = {} - backup_dict['rsync_ssh_list'] = [] - backup_dict['rsync_btrfs_list'] = [] - backup_dict['rsync_list'] = [] - backup_dict['null_list'] = [] - backup_dict['pgsql_list'] = [] - backup_dict['mysql_list'] = [] - #backup_dict['sqlserver_list'] = [] - backup_dict['xva_list'] = [] - backup_dict['metadata_list'] = [] - #backup_dict['switch_list'] = [] - #backup_dict['oracle_list'] = [] + backup_dict["rsync_ssh_list"] = [] + backup_dict["rsync_btrfs_list"] = [] + backup_dict["rsync_list"] = [] + backup_dict["null_list"] = [] + backup_dict["pgsql_list"] = [] + backup_dict["mysql_list"] = [] + # backup_dict['sqlserver_list'] = [] + backup_dict["xva_list"] = [] + backup_dict["metadata_list"] = [] + # backup_dict['switch_list'] = [] + # backup_dict['oracle_list'] = [] for row in result: - backup_name = row['backup_name'] - server_name = row['server_name'] - backup_type = row['type'] + backup_name = row["backup_name"] + server_name = row["server_name"] + backup_type = row["type"] if backup_type == "xcp-dump-metadata": - backup_dict['metadata_list'].append([server_name, backup_name, backup_type, ""]) + backup_dict["metadata_list"].append([server_name, backup_name, backup_type, ""]) if backup_type == "rsync+ssh": - remote_dir = row['remote_dir'] - backup_dict['rsync_ssh_list'].append([server_name, backup_name, backup_type,remote_dir]) + remote_dir = row["remote_dir"] + backup_dict["rsync_ssh_list"].append([server_name, backup_name, backup_type, remote_dir]) if backup_type == "rsync+btrfs+ssh": - remote_dir = row['remote_dir'] - backup_dict['rsync_btrfs_list'].append([server_name, backup_name, backup_type,remote_dir]) + remote_dir = row["remote_dir"] + backup_dict["rsync_btrfs_list"].append([server_name, backup_name, backup_type, remote_dir]) if backup_type == "rsync": - remote_dir = row['remote_dir'] - backup_dict['rsync_list'].append([server_name, backup_name, backup_type,remote_dir]) + remote_dir = row["remote_dir"] + backup_dict["rsync_list"].append([server_name, backup_name, backup_type, remote_dir]) if backup_type == "null": - backup_dict['null_list'].append([server_name, backup_name, backup_type, ""]) + backup_dict["null_list"].append([server_name, backup_name, backup_type, ""]) if backup_type == "pgsql+ssh": - db_name = row['db_name'] if len(row['db_name']) > 0 else '*' - backup_dict['pgsql_list'].append([server_name, backup_name, backup_type, db_name]) + db_name = row["db_name"] if len(row["db_name"]) > 0 else "*" + backup_dict["pgsql_list"].append([server_name, backup_name, backup_type, db_name]) if backup_type == "mysql+ssh": - db_name = row['db_name'] if len(row['db_name']) > 0 else '*' - backup_dict['mysql_list'].append([server_name, backup_name, backup_type, db_name]) + db_name = row["db_name"] if len(row["db_name"]) > 0 else "*" + backup_dict["mysql_list"].append([server_name, backup_name, backup_type, db_name]) # if backup_type == "sqlserver+ssh": # db_name = row['db_name'] # backup_dict['sqlserver_list'].append([server_name, backup_name, backup_type, db_name]) @@ -250,49 +240,68 @@ def read_config(): # db_name = row['db_name'] # backup_dict['oracle_list'].append([server_name, backup_name, backup_type, db_name]) if backup_type == "xen-xva": - backup_dict['xva_list'].append([server_name, backup_name, backup_type, ""]) + backup_dict["xva_list"].append([server_name, backup_name, backup_type, ""]) # if backup_type == "switch": # backup_dict['switch_list'].append([server_name, backup_name, backup_type, ""]) return backup_dict -@app.route('/') + +@app.route("/") def backup_all(): backup_dict = read_config() - return render_template('backups.html', backup_list = backup_dict) + return render_template("backups.html", backup_list=backup_dict) -@app.route('/config_number/') -@app.route('/config_number/') +@app.route("/config_number/") +@app.route("/config_number/") def set_config_number(id=None): if id is not None and len(CONFIG) > id: global config_number - config_number=id + config_number = id read_config() - return jsonify(configs=CONFIG,config_number=config_number) + return jsonify(configs=CONFIG, config_number=config_number) -@app.route('/all_json') +@app.route("/all_json") def backup_all_json(): backup_dict = read_all_configs(BASE_DIR) - return json.dumps(backup_dict['rsync_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list']) - #+ backup_dict['switch_list'])+backup_dict['sqlserver_list'] + return json.dumps( + backup_dict["rsync_list"] + + backup_dict["rsync_btrfs_list"] + + backup_dict["rsync_ssh_list"] + + backup_dict["pgsql_list"] + + backup_dict["mysql_list"] + + backup_dict["xva_list"] + + backup_dict["null_list"] + + backup_dict["metadata_list"] + ) + # + backup_dict['switch_list'])+backup_dict['sqlserver_list'] -@app.route('/json') +@app.route("/json") def backup_json(): backup_dict = read_config() - return json.dumps(backup_dict['rsync_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list']) - #+ backup_dict['switch_list'])+backup_dict['sqlserver_list'] + return json.dumps( + backup_dict["rsync_list"] + + backup_dict["rsync_btrfs_list"] + + backup_dict["rsync_ssh_list"] + + backup_dict["pgsql_list"] + + backup_dict["mysql_list"] + + backup_dict["xva_list"] + + backup_dict["null_list"] + + backup_dict["metadata_list"] + ) + # + backup_dict['switch_list'])+backup_dict['sqlserver_list'] def check_usb_disk(): """This method returns the mounts point of FIRST external disk""" -# disk_name = [] + # disk_name = [] usb_disk_list = [] - for name in glob.glob('/dev/sd[a-z]'): + for name in glob.glob("/dev/sd[a-z]"): for line in os.popen("udevadm info -q env -n %s" % name): if re.match("ID_PATH=.*usb.*", line): - usb_disk_list += [ name ] + usb_disk_list += [name] if len(usb_disk_list) == 0: raise_error("Cannot find any external usb disk", "You should plug the usb hard drive into the server") @@ -301,20 +310,23 @@ def check_usb_disk(): usb_partition_list = [] for usb_disk in usb_disk_list: - cmd = "udevadm info -q path -n %s" % usb_disk + '1' - output = os.popen(cmd).read() + cmd = "udevadm info -q path -n %s" % usb_disk + "1" + output = os.popen(cmd).read() print("cmd : " + cmd) print("output : " + output) - if '/devices/pci' in output: - #flash("partition found: %s1" % usb_disk) + if "/devices/pci" in output: + # flash("partition found: %s1" % usb_disk) usb_partition_list.append(usb_disk + "1") print(usb_partition_list) - if len(usb_partition_list) ==0: - raise_error("The drive %s has no partition" % (usb_disk_list[0] ), "You should initialize the usb drive and format an ext4 partition with TISBACKUP label") - return "" + if len(usb_partition_list) == 0: + raise_error( + "The drive %s has no partition" % (usb_disk_list[0]), + "You should initialize the usb drive and format an ext4 partition with TISBACKUP label", + ) + return "" tisbackup_partition_list = [] for usb_partition in usb_partition_list: @@ -322,133 +334,139 @@ def check_usb_disk(): flash("tisbackup backup partition found: %s" % usb_partition) tisbackup_partition_list.append(usb_partition) - print(tisbackup_partition_list) + print(tisbackup_partition_list) - if len(tisbackup_partition_list) ==0: - raise_error("No tisbackup partition exist on disk %s" % (usb_disk_list[0] ), "You should initialize the usb drive and format an ext4 partition with TISBACKUP label") + if len(tisbackup_partition_list) == 0: + raise_error( + "No tisbackup partition exist on disk %s" % (usb_disk_list[0]), + "You should initialize the usb drive and format an ext4 partition with TISBACKUP label", + ) return "" - if len(tisbackup_partition_list) > 1: + if len(tisbackup_partition_list) > 1: raise_error("There are many usb disk", "You should plug remove one of them") return "" - return tisbackup_partition_list[0] -def check_already_mount(partition_name,refresh): - with open('/proc/mounts') as f: +def check_already_mount(partition_name, refresh): + with open("/proc/mounts") as f: mount_point = "" for line in f.readlines(): if line.startswith(partition_name): - mount_point = line.split(' ')[1] + mount_point = line.split(" ")[1] if not refresh: run_command("/bin/umount %s" % mount_point) - os.rmdir(mount_point) + os.rmdir(mount_point) return mount_point + def run_command(cmd, info=""): - flash("Executing: %s"% cmd) - from subprocess import CalledProcessError, check_output - result ="" + flash("Executing: %s" % cmd) + from subprocess import CalledProcessError, check_output + + result = "" try: - result = check_output(cmd, stderr=subprocess.STDOUT,shell=True) + result = check_output(cmd, stderr=subprocess.STDOUT, shell=True) except CalledProcessError: - raise_error(result,info) + raise_error(result, info) return result -def check_mount_disk(partition_name, refresh): - - mount_point = check_already_mount(partition_name, refresh) - if not refresh: - - mount_point = "/mnt/TISBACKUP-" +str(time.time()) +def check_mount_disk(partition_name, refresh): + + mount_point = check_already_mount(partition_name, refresh) + if not refresh: + + mount_point = "/mnt/TISBACKUP-" + str(time.time()) os.mkdir(mount_point) - flash("must mount " + partition_name ) + flash("must mount " + partition_name) cmd = "mount %s %s" % (partition_name, mount_point) - if run_command(cmd,"You should manualy mount the usb drive") != "": + if run_command(cmd, "You should manualy mount the usb drive") != "": flash("Remove directory: %s" % mount_point) - os.rmdir(mount_point) - return "" + os.rmdir(mount_point) + return "" return mount_point -@app.route('/status.json') + +@app.route("/status.json") def export_backup_status(): exports = dbstat.query('select * from stats where TYPE="EXPORT" and backup_start>="%s"' % mindate) error = "" - finish=not runnings_backups() - if get_task() is not None and finish: + finish = not runnings_backups() + if get_task() is not None and finish: status = get_task().get() if status != "ok": - error = "Export failing with error: "+status + error = "Export failing with error: " + status + + return jsonify(data=exports, finish=finish, error=error) - - return jsonify(data=exports,finish=finish,error=error) def runnings_backups(): - task = get_task() - is_runnig = (task is not None) - finish = ( is_runnig and task.get() is not None) + task = get_task() + is_runnig = task is not None + finish = is_runnig and task.get() is not None return is_runnig and not finish -@app.route('/backups.json') +@app.route("/backups.json") def last_backup_json(): exports = dbstat.query('select * from stats where TYPE="BACKUP" ORDER BY backup_start DESC ') - return Response(response=json.dumps(exports), - status=200, - mimetype="application/json") + return Response(response=json.dumps(exports), status=200, mimetype="application/json") -@app.route('/last_backups') +@app.route("/last_backups") def last_backup(): exports = dbstat.query('select * from stats where TYPE="BACKUP" ORDER BY backup_start DESC LIMIT 20 ') return render_template("last_backups.html", backups=exports) -@app.route('/export_backup') +@app.route("/export_backup") def export_backup(): - + raise_error("", "") backup_dict = read_config() sections = [] backup_sections = [] - for backup_types in backup_dict: + for backup_types in backup_dict: if backup_types == "null_list": continue for section in backup_dict[backup_types]: - #if section.count > 0: + # if section.count > 0: if len(section) > 0: sections.append(section[1]) - noJobs = (not runnings_backups()) + noJobs = not runnings_backups() if "start" in list(request.args.keys()) or not noJobs: - start=True + start = True if "sections" in list(request.args.keys()): - backup_sections = request.args.getlist('sections') - + backup_sections = request.args.getlist("sections") else: - start=False + start = False cp.read(tisbackup_config_file) partition_name = check_usb_disk() if partition_name: if noJobs: - mount_point = check_mount_disk( partition_name, False) + mount_point = check_mount_disk(partition_name, False) else: - mount_point = check_mount_disk( partition_name, True) + mount_point = check_mount_disk(partition_name, True) if noJobs: - global mindate - mindate = datetime2isodate(datetime.datetime.now()) + global mindate + mindate = datetime2isodate(datetime.datetime.now()) if not error and start: print(tisbackup_config_file) - task = run_export_backup(base=backup_base_dir, config_file=CONFIG[config_number], mount_point=mount_point, backup_sections=",".join([str(x) for x in backup_sections])) + task = run_export_backup( + base=backup_base_dir, + config_file=CONFIG[config_number], + mount_point=mount_point, + backup_sections=",".join([str(x) for x in backup_sections]), + ) set_task(task) - - + return render_template("export_backup.html", error=error, start=start, info=info, email=ADMIN_EMAIL, sections=sections) @@ -458,9 +476,10 @@ def raise_error(strError, strInfo): info = strInfo -if __name__ == "__main__": +if __name__ == "__main__": read_config() from os import environ - if 'WINGDB_ACTIVE' in environ: + + if "WINGDB_ACTIVE" in environ: app.debug = False - app.run(host= '0.0.0.0',port=8080) + app.run(host="0.0.0.0", port=8080)