few fixes and lint compatible
This commit is contained in:
parent
8479c378ee
commit
e7e98d0b47
@ -19,7 +19,6 @@ jobs:
|
||||
- run: pip install ruff
|
||||
- run: |
|
||||
ruff check .
|
||||
ruff fix .
|
||||
# - uses: stefanzweifel/git-auto-commit-action@v4
|
||||
# with:
|
||||
# commit_message: 'style fixes by ruff'
|
||||
|
13
.hadolint.yml
Normal file
13
.hadolint.yml
Normal file
@ -0,0 +1,13 @@
|
||||
DL3008failure-threshold: warning
|
||||
format: tty
|
||||
ignored:
|
||||
- DL3007
|
||||
override:
|
||||
error:
|
||||
- DL3015
|
||||
warning:
|
||||
- DL3015
|
||||
info:
|
||||
- DL3008
|
||||
style:
|
||||
- DL3015
|
12
Dockerfile
12
Dockerfile
@ -5,13 +5,11 @@ WORKDIR /opt/tisbackup
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
COPY . /opt/tisbackup
|
||||
|
||||
RUN apt update \
|
||||
&& apt install --no-install-recommends -y rsync ssh cron \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN /usr/local/bin/python3.12 -m pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
RUN mkdir -p /var/spool/cron/crontabs \
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends -y rsync ssh cron \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& /usr/local/bin/python3.12 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& mkdir -p /var/spool/cron/crontabs \
|
||||
&& echo '59 03 * * * root /bin/bash /opt/tisbackup/backup.sh' > /etc/crontab \
|
||||
&& echo '' >> /etc/crontab \
|
||||
&& crontab /etc/crontab
|
||||
|
196
tisbackup.py
196
tisbackup.py
@ -23,8 +23,8 @@ import sys
|
||||
from os.path import isfile, join
|
||||
|
||||
tisbackup_root_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
sys.path.insert(0,os.path.join(tisbackup_root_dir,'lib'))
|
||||
sys.path.insert(0,os.path.join(tisbackup_root_dir,'libtisbackup'))
|
||||
sys.path.insert(0, os.path.join(tisbackup_root_dir, "lib"))
|
||||
sys.path.insert(0, os.path.join(tisbackup_root_dir, "libtisbackup"))
|
||||
|
||||
import errno
|
||||
import logging
|
||||
@ -35,14 +35,16 @@ from optparse import OptionParser
|
||||
from iniparse import ConfigParser, ini
|
||||
|
||||
from libtisbackup.backup_mysql import backup_mysql
|
||||
|
||||
# from libtisbackup.backup_vmdk import backup_vmdk
|
||||
# from libtisbackup.backup_switch import backup_switch
|
||||
from libtisbackup.backup_null import backup_null
|
||||
from libtisbackup.backup_pgsql import backup_pgsql
|
||||
from libtisbackup.backup_rsync import backup_rsync, backup_rsync_ssh
|
||||
|
||||
# from libtisbackup.backup_oracle import backup_oracle
|
||||
from libtisbackup.backup_rsync_btrfs import (backup_rsync__btrfs_ssh,
|
||||
backup_rsync_btrfs)
|
||||
from libtisbackup.backup_rsync_btrfs import backup_rsync__btrfs_ssh, backup_rsync_btrfs
|
||||
|
||||
# from libtisbackup.backup_sqlserver import backup_sqlserver
|
||||
from libtisbackup.backup_samba4 import backup_samba4
|
||||
from libtisbackup.backup_xcp_metadata import backup_xcp_metadata
|
||||
@ -70,23 +72,45 @@ action is either :
|
||||
version = "VERSION"
|
||||
|
||||
parser = OptionParser(usage=usage, version="%prog " + version)
|
||||
parser.add_option("-c","--config", dest="config", default='/etc/tis/tisbackup-config.ini', help="Config file full path (default: %default)")
|
||||
parser.add_option("-d","--dry-run", dest="dry_run", default=False, action='store_true', help="Dry run (default: %default)")
|
||||
parser.add_option("-v","--verbose", dest="verbose", default=False, action='store_true', help="More information (default: %default)")
|
||||
parser.add_option("-s","--sections", dest="sections", default='', help="Comma separated list of sections (backups) to process (default: All)")
|
||||
parser.add_option("-l","--loglevel", dest="loglevel", default='info', type='choice', choices=['debug','warning','info','error','critical'], metavar='LOGLEVEL',help="Loglevel (default: %default)")
|
||||
parser.add_option("-n","--len", dest="statscount", default=30, type='int', help="Number of lines to list for dumpstat (default: %default)")
|
||||
parser.add_option("-b","--backupdir", dest="backup_base_dir", default='', help="Base directory for all backups (default: [global] backup_base_dir in config file)")
|
||||
parser.add_option("-x","--exportdir", dest="exportdir", default='', help="Directory where to export latest backups with exportbackup (nodefault)")
|
||||
parser.add_option(
|
||||
"-c", "--config", dest="config", default="/etc/tis/tisbackup-config.ini", help="Config file full path (default: %default)"
|
||||
)
|
||||
parser.add_option("-d", "--dry-run", dest="dry_run", default=False, action="store_true", help="Dry run (default: %default)")
|
||||
parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true", help="More information (default: %default)")
|
||||
parser.add_option(
|
||||
"-s", "--sections", dest="sections", default="", help="Comma separated list of sections (backups) to process (default: All)"
|
||||
)
|
||||
parser.add_option(
|
||||
"-l",
|
||||
"--loglevel",
|
||||
dest="loglevel",
|
||||
default="info",
|
||||
type="choice",
|
||||
choices=["debug", "warning", "info", "error", "critical"],
|
||||
metavar="LOGLEVEL",
|
||||
help="Loglevel (default: %default)",
|
||||
)
|
||||
parser.add_option("-n", "--len", dest="statscount", default=30, type="int", help="Number of lines to list for dumpstat (default: %default)")
|
||||
parser.add_option(
|
||||
"-b",
|
||||
"--backupdir",
|
||||
dest="backup_base_dir",
|
||||
default="",
|
||||
help="Base directory for all backups (default: [global] backup_base_dir in config file)",
|
||||
)
|
||||
parser.add_option(
|
||||
"-x", "--exportdir", dest="exportdir", default="", help="Directory where to export latest backups with exportbackup (nodefault)"
|
||||
)
|
||||
|
||||
|
||||
class tis_backup:
|
||||
logger = logging.getLogger('tisbackup')
|
||||
logger = logging.getLogger("tisbackup")
|
||||
|
||||
def __init__(self,dry_run=False,verbose=False,backup_base_dir=''):
|
||||
def __init__(self, dry_run=False, verbose=False, backup_base_dir=""):
|
||||
self.dry_run = dry_run
|
||||
self.verbose = verbose
|
||||
self.backup_base_dir = backup_base_dir
|
||||
self.backup_base_dir = ''
|
||||
self.backup_base_dir = ""
|
||||
self.backup_list = []
|
||||
self.dry_run = dry_run
|
||||
self.verbose = False
|
||||
@ -97,22 +121,23 @@ class tis_backup:
|
||||
cp.read(filename)
|
||||
|
||||
if not self.backup_base_dir:
|
||||
self.backup_base_dir = cp.get('global','backup_base_dir')
|
||||
self.backup_base_dir = cp.get("global", "backup_base_dir")
|
||||
if not os.path.isdir(self.backup_base_dir):
|
||||
self.logger.info('Creating backup directory %s' % self.backup_base_dir)
|
||||
self.logger.info("Creating backup directory %s" % self.backup_base_dir)
|
||||
os.makedirs(self.backup_base_dir)
|
||||
|
||||
self.logger.debug("backup directory : " + self.backup_base_dir)
|
||||
self.dbstat = BackupStat(os.path.join(self.backup_base_dir,'log','tisbackup.sqlite'))
|
||||
self.dbstat = BackupStat(os.path.join(self.backup_base_dir, "log", "tisbackup.sqlite"))
|
||||
|
||||
for section in cp.sections():
|
||||
if (section != 'global'):
|
||||
if section != "global":
|
||||
self.logger.debug("reading backup config " + section)
|
||||
backup_item = None
|
||||
type = cp.get(section,'type')
|
||||
type = cp.get(section, "type")
|
||||
|
||||
backup_item = backup_drivers[type](backup_name=section,
|
||||
backup_dir=os.path.join(self.backup_base_dir,section),dbstat=self.dbstat,dry_run=self.dry_run)
|
||||
backup_item = backup_drivers[type](
|
||||
backup_name=section, backup_dir=os.path.join(self.backup_base_dir, section), dbstat=self.dbstat, dry_run=self.dry_run
|
||||
)
|
||||
backup_item.read_config(cp)
|
||||
backup_item.verbose = self.verbose
|
||||
|
||||
@ -122,20 +147,19 @@ class tis_backup:
|
||||
# TODO socket.gethostbyaddr('64.236.16.20')
|
||||
# TODO limit backup to one backup on the command line
|
||||
|
||||
|
||||
def checknagios(self, sections=[]):
|
||||
try:
|
||||
if not sections:
|
||||
sections = [backup_item.backup_name for backup_item in self.backup_list]
|
||||
|
||||
self.logger.debug('Start of check nagios for %s' % (','.join(sections),))
|
||||
self.logger.debug("Start of check nagios for %s" % (",".join(sections),))
|
||||
try:
|
||||
worst_nagiosstatus = None
|
||||
ok = []
|
||||
warning = []
|
||||
critical = []
|
||||
unknown = []
|
||||
nagiosoutput = ''
|
||||
nagiosoutput = ""
|
||||
for backup_item in self.backup_list:
|
||||
if not sections or backup_item.backup_name in sections:
|
||||
(nagiosstatus, log) = backup_item.checknagios()
|
||||
@ -150,7 +174,7 @@ class tis_backup:
|
||||
self.logger.debug('[%s] nagios:"%i" log: %s', backup_item.backup_name, nagiosstatus, log)
|
||||
|
||||
if not ok and not critical and not unknown and not warning:
|
||||
self.logger.debug('Nothing processed')
|
||||
self.logger.debug("Nothing processed")
|
||||
worst_nagiosstatus = nagiosStateUnknown
|
||||
nagiosoutput = 'UNKNOWN : Unknown backup sections "%s"' % sections
|
||||
|
||||
@ -159,40 +183,39 @@ class tis_backup:
|
||||
if unknown:
|
||||
if not worst_nagiosstatus:
|
||||
worst_nagiosstatus = nagiosStateUnknown
|
||||
nagiosoutput = 'UNKNOWN status backups %s' % (','.join([b[0] for b in unknown]))
|
||||
nagiosoutput = "UNKNOWN status backups %s" % (",".join([b[0] for b in unknown]))
|
||||
globallog.extend(unknown)
|
||||
|
||||
if critical:
|
||||
if not worst_nagiosstatus:
|
||||
worst_nagiosstatus = nagiosStateCritical
|
||||
nagiosoutput = 'CRITICAL backups %s' % (','.join([b[0] for b in critical]))
|
||||
nagiosoutput = "CRITICAL backups %s" % (",".join([b[0] for b in critical]))
|
||||
globallog.extend(critical)
|
||||
|
||||
if warning:
|
||||
if not worst_nagiosstatus:
|
||||
worst_nagiosstatus = nagiosStateWarning
|
||||
nagiosoutput = 'WARNING backups %s' % (','.join([b[0] for b in warning]))
|
||||
nagiosoutput = "WARNING backups %s" % (",".join([b[0] for b in warning]))
|
||||
globallog.extend(warning)
|
||||
|
||||
if ok:
|
||||
if not worst_nagiosstatus:
|
||||
worst_nagiosstatus = nagiosStateOk
|
||||
nagiosoutput = 'OK backups %s' % (','.join([b[0] for b in ok]))
|
||||
nagiosoutput = "OK backups %s" % (",".join([b[0] for b in ok]))
|
||||
globallog.extend(ok)
|
||||
|
||||
if worst_nagiosstatus == nagiosStateOk:
|
||||
nagiosoutput = 'ALL backups OK %s' % (','.join(sections))
|
||||
|
||||
nagiosoutput = "ALL backups OK %s" % (",".join(sections))
|
||||
|
||||
except BaseException as e:
|
||||
worst_nagiosstatus = nagiosStateCritical
|
||||
nagiosoutput = 'EXCEPTION',"Critical : %s" % str(e)
|
||||
nagiosoutput = "EXCEPTION", "Critical : %s" % str(e)
|
||||
raise
|
||||
|
||||
finally:
|
||||
self.logger.debug('worst nagios status :"%i"', worst_nagiosstatus)
|
||||
print('%s (tisbackup V%s)' %(nagiosoutput,version))
|
||||
print('\n'.join(["[%s]:%s" % (log_elem[0],log_elem[1]) for log_elem in globallog]))
|
||||
print("%s (tisbackup V%s)" % (nagiosoutput, version))
|
||||
print("\n".join(["[%s]:%s" % (log_elem[0], log_elem[1]) for log_elem in globallog]))
|
||||
sys.exit(worst_nagiosstatus)
|
||||
|
||||
def process_backup(self, sections=[]):
|
||||
@ -201,50 +224,50 @@ class tis_backup:
|
||||
if not sections:
|
||||
sections = [backup_item.backup_name for backup_item in self.backup_list]
|
||||
|
||||
self.logger.info('Processing backup for %s' % (','.join(sections)) )
|
||||
self.logger.info("Processing backup for %s" % (",".join(sections)))
|
||||
for backup_item in self.backup_list:
|
||||
if not sections or backup_item.backup_name in sections:
|
||||
try:
|
||||
assert(isinstance(backup_item,backup_generic))
|
||||
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
||||
assert isinstance(backup_item, backup_generic)
|
||||
self.logger.info("Processing [%s]", (backup_item.backup_name))
|
||||
stats = backup_item.process_backup()
|
||||
processed.append((backup_item.backup_name, stats))
|
||||
except BaseException as e:
|
||||
self.logger.critical('Backup [%s] processed with error : %s',backup_item.backup_name,e)
|
||||
self.logger.critical("Backup [%s] processed with error : %s", backup_item.backup_name, e)
|
||||
errors.append((backup_item.backup_name, str(e)))
|
||||
if not processed and not errors:
|
||||
self.logger.critical('No backup properly finished or processed')
|
||||
self.logger.critical("No backup properly finished or processed")
|
||||
else:
|
||||
if processed:
|
||||
self.logger.info('Backup processed : %s' , ",".join([b[0] for b in processed]))
|
||||
self.logger.info("Backup processed : %s", ",".join([b[0] for b in processed]))
|
||||
if errors:
|
||||
self.logger.error('Backup processed with errors: %s' , ",".join([b[0] for b in errors]))
|
||||
self.logger.error("Backup processed with errors: %s", ",".join([b[0] for b in errors]))
|
||||
|
||||
def export_backups(self,sections=[],exportdir=''):
|
||||
def export_backups(self, sections=[], exportdir=""):
|
||||
processed = []
|
||||
errors = []
|
||||
if not sections:
|
||||
sections = [backup_item.backup_name for backup_item in self.backup_list]
|
||||
|
||||
self.logger.info('Exporting OK backups for %s to %s' % (','.join(sections),exportdir) )
|
||||
self.logger.info("Exporting OK backups for %s to %s" % (",".join(sections), exportdir))
|
||||
|
||||
for backup_item in self.backup_list:
|
||||
if backup_item.backup_name in sections:
|
||||
try:
|
||||
assert(isinstance(backup_item,backup_generic))
|
||||
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
||||
assert isinstance(backup_item, backup_generic)
|
||||
self.logger.info("Processing [%s]", (backup_item.backup_name))
|
||||
stats = backup_item.export_latestbackup(destdir=exportdir)
|
||||
processed.append((backup_item.backup_name, stats))
|
||||
except BaseException as e:
|
||||
self.logger.critical('Export Backup [%s] processed with error : %s',backup_item.backup_name,e)
|
||||
self.logger.critical("Export Backup [%s] processed with error : %s", backup_item.backup_name, e)
|
||||
errors.append((backup_item.backup_name, str(e)))
|
||||
if not processed and not errors:
|
||||
self.logger.critical('No export backup properly finished or processed')
|
||||
self.logger.critical("No export backup properly finished or processed")
|
||||
else:
|
||||
if processed:
|
||||
self.logger.info('Export Backups processed : %s' , ",".join([b[0] for b in processed]))
|
||||
self.logger.info("Export Backups processed : %s", ",".join([b[0] for b in processed]))
|
||||
if errors:
|
||||
self.logger.error('Export Backups processed with errors: %s' , ",".join([b[0] for b in errors]))
|
||||
self.logger.error("Export Backups processed with errors: %s", ",".join([b[0] for b in errors]))
|
||||
|
||||
def retry_failed_backups(self, maxage_hours=30):
|
||||
processed = []
|
||||
@ -252,62 +275,62 @@ class tis_backup:
|
||||
|
||||
# before mindate, backup is too old
|
||||
mindate = datetime2isodate((datetime.datetime.now() - datetime.timedelta(hours=maxage_hours)))
|
||||
failed_backups = self.dbstat.query("""\
|
||||
failed_backups = self.dbstat.query(
|
||||
"""\
|
||||
select distinct backup_name as bname
|
||||
from stats
|
||||
where status="OK" and backup_start>=?""",(mindate,))
|
||||
|
||||
where status="OK" and backup_start>=?""",
|
||||
(mindate,),
|
||||
)
|
||||
|
||||
defined_backups = list(map(lambda f: f.backup_name, [x for x in self.backup_list if not isinstance(x, backup_null)]))
|
||||
failed_backups_names = set(defined_backups) - set([b['bname'] for b in failed_backups if b['bname'] in defined_backups])
|
||||
|
||||
failed_backups_names = set(defined_backups) - set([b["bname"] for b in failed_backups if b["bname"] in defined_backups])
|
||||
|
||||
if failed_backups_names:
|
||||
self.logger.info('Processing backup for %s',','.join(failed_backups_names))
|
||||
self.logger.info("Processing backup for %s", ",".join(failed_backups_names))
|
||||
for backup_item in self.backup_list:
|
||||
if backup_item.backup_name in failed_backups_names:
|
||||
try:
|
||||
assert(isinstance(backup_item,backup_generic))
|
||||
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
||||
assert isinstance(backup_item, backup_generic)
|
||||
self.logger.info("Processing [%s]", (backup_item.backup_name))
|
||||
stats = backup_item.process_backup()
|
||||
processed.append((backup_item.backup_name, stats))
|
||||
except BaseException as e:
|
||||
self.logger.critical('Backup [%s] not processed, error : %s',backup_item.backup_name,e)
|
||||
self.logger.critical("Backup [%s] not processed, error : %s", backup_item.backup_name, e)
|
||||
errors.append((backup_item.backup_name, str(e)))
|
||||
if not processed and not errors:
|
||||
self.logger.critical('No backup properly finished or processed')
|
||||
self.logger.critical("No backup properly finished or processed")
|
||||
else:
|
||||
if processed:
|
||||
self.logger.info('Backup processed : %s' , ",".join([b[0] for b in errors]))
|
||||
self.logger.info("Backup processed : %s", ",".join([b[0] for b in errors]))
|
||||
if errors:
|
||||
self.logger.error('Backup processed with errors: %s' , ",".join([b[0] for b in errors]))
|
||||
self.logger.error("Backup processed with errors: %s", ",".join([b[0] for b in errors]))
|
||||
else:
|
||||
self.logger.info('No recent failed backups found in database')
|
||||
|
||||
self.logger.info("No recent failed backups found in database")
|
||||
|
||||
def cleanup_backup_section(self, sections=[]):
|
||||
processed = False
|
||||
if not sections:
|
||||
sections = [backup_item.backup_name for backup_item in self.backup_list]
|
||||
|
||||
self.logger.info('Processing cleanup for %s' % (','.join(sections)) )
|
||||
self.logger.info("Processing cleanup for %s" % (",".join(sections)))
|
||||
for backup_item in self.backup_list:
|
||||
if backup_item.backup_name in sections:
|
||||
try:
|
||||
assert(isinstance(backup_item,backup_generic))
|
||||
self.logger.info('Processing cleanup of [%s]',(backup_item.backup_name))
|
||||
assert isinstance(backup_item, backup_generic)
|
||||
self.logger.info("Processing cleanup of [%s]", (backup_item.backup_name))
|
||||
backup_item.cleanup_backup()
|
||||
processed = True
|
||||
except BaseException as e:
|
||||
self.logger.critical('Cleanup of [%s] not processed, error : %s',backup_item.backup_name,e)
|
||||
self.logger.critical("Cleanup of [%s] not processed, error : %s", backup_item.backup_name, e)
|
||||
if not processed:
|
||||
self.logger.critical('No cleanup properly finished or processed')
|
||||
self.logger.critical("No cleanup properly finished or processed")
|
||||
|
||||
def register_existingbackups(self, sections=[]):
|
||||
if not sections:
|
||||
sections = [backup_item.backup_name for backup_item in self.backup_list]
|
||||
|
||||
self.logger.info('Append existing backups to database...')
|
||||
self.logger.info("Append existing backups to database...")
|
||||
for backup_item in self.backup_list:
|
||||
if backup_item.backup_name in sections:
|
||||
backup_item.register_existingbackups()
|
||||
@ -315,11 +338,11 @@ class tis_backup:
|
||||
def html_report(self):
|
||||
for backup_item in self.backup_list:
|
||||
if not section or section == backup_item.backup_name:
|
||||
assert(isinstance(backup_item,backup_generic))
|
||||
assert isinstance(backup_item, backup_generic)
|
||||
if not maxage_hours:
|
||||
maxage_hours = backup_item.maximum_backup_age
|
||||
(nagiosstatus, log) = backup_item.checknagios(maxage_hours=maxage_hours)
|
||||
globallog.append('[%s] %s' % (backup_item.backup_name,log))
|
||||
globallog.append("[%s] %s" % (backup_item.backup_name, log))
|
||||
self.logger.debug('[%s] nagios:"%i" log: %s', backup_item.backup_name, nagiosstatus, log)
|
||||
# processed = True
|
||||
# if nagiosstatus >= worst_nagiosstatus:
|
||||
@ -334,7 +357,7 @@ def main():
|
||||
parser.print_usage()
|
||||
sys.exit(2)
|
||||
|
||||
backup_start_date = datetime.datetime.now().strftime('%Y%m%d-%Hh%Mm%S')
|
||||
backup_start_date = datetime.datetime.now().strftime("%Y%m%d-%Hh%Mm%S")
|
||||
|
||||
# options
|
||||
action = args[0]
|
||||
@ -350,16 +373,16 @@ def main():
|
||||
loglevel = options.loglevel
|
||||
|
||||
# setup Logger
|
||||
logger = logging.getLogger('tisbackup')
|
||||
logger = logging.getLogger("tisbackup")
|
||||
hdlr = logging.StreamHandler()
|
||||
hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
|
||||
hdlr.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
|
||||
logger.addHandler(hdlr)
|
||||
|
||||
# set loglevel
|
||||
if loglevel in ('debug','warning','info','error','critical'):
|
||||
if loglevel in ("debug", "warning", "info", "error", "critical"):
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError('Invalid log level: %s' % loglevel)
|
||||
raise ValueError("Invalid log level: %s" % loglevel)
|
||||
logger.setLevel(numeric_level)
|
||||
|
||||
# Config file
|
||||
@ -370,19 +393,19 @@ def main():
|
||||
cp = ConfigParser()
|
||||
cp.read(config_file)
|
||||
|
||||
backup_base_dir = options.backup_base_dir or cp.get('global','backup_base_dir')
|
||||
log_dir = os.path.join(backup_base_dir,'log')
|
||||
backup_base_dir = options.backup_base_dir or cp.get("global", "backup_base_dir")
|
||||
log_dir = os.path.join(backup_base_dir, "log")
|
||||
if not os.path.exists(log_dir):
|
||||
os.makedirs(log_dir)
|
||||
|
||||
# if we run the nagios check, we don't create log file, everything is piped to stdout
|
||||
if action!='checknagios':
|
||||
if action != "checknagios":
|
||||
try:
|
||||
hdlr = logging.FileHandler(os.path.join(log_dir,'tisbackup_%s.log' % (backup_start_date)))
|
||||
hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
|
||||
hdlr = logging.FileHandler(os.path.join(log_dir, "tisbackup_%s.log" % (backup_start_date)))
|
||||
hdlr.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
|
||||
logger.addHandler(hdlr)
|
||||
except IOError as e:
|
||||
if action == 'cleanup' and e.errno == errno.ENOSPC:
|
||||
if action == "cleanup" and e.errno == errno.ENOSPC:
|
||||
logger.warning("No space left on device, disabling file logging.")
|
||||
else:
|
||||
raise e
|
||||
@ -391,7 +414,7 @@ def main():
|
||||
backup = tis_backup(dry_run=dry_run, verbose=verbose, backup_base_dir=backup_base_dir)
|
||||
backup.read_ini_file(config_file)
|
||||
|
||||
backup_sections = options.sections.split(',') if options.sections else []
|
||||
backup_sections = options.sections.split(",") if options.sections else []
|
||||
|
||||
all_sections = [backup_item.backup_name for backup_item in backup.backup_list]
|
||||
if not backup_sections:
|
||||
@ -399,7 +422,7 @@ def main():
|
||||
else:
|
||||
for b in backup_sections:
|
||||
if b not in all_sections:
|
||||
raise Exception('Section %s is not defined in config file' % b)
|
||||
raise Exception("Section %s is not defined in config file" % b)
|
||||
|
||||
if dry_run:
|
||||
logger.warning("WARNING : DRY RUN, nothing will be done, just printing on screen...")
|
||||
@ -408,7 +431,7 @@ def main():
|
||||
backup.process_backup(backup_sections)
|
||||
elif action == "exportbackup":
|
||||
if not options.exportdir:
|
||||
raise Exception('No export directory supplied dor exportbackup action')
|
||||
raise Exception("No export directory supplied dor exportbackup action")
|
||||
backup.export_backups(backup_sections, options.exportdir)
|
||||
elif action == "cleanup":
|
||||
backup.cleanup_backup_section(backup_sections)
|
||||
@ -422,7 +445,6 @@ def main():
|
||||
elif action == "register_existing":
|
||||
backup.register_existingbackups(backup_sections)
|
||||
|
||||
|
||||
else:
|
||||
logger.error('Unhandled action "%s", quitting...', action)
|
||||
sys.exit(1)
|
||||
|
235
tisbackup_gui.py
235
tisbackup_gui.py
@ -22,8 +22,8 @@ import sys
|
||||
from os.path import isfile, join
|
||||
|
||||
tisbackup_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
|
||||
sys.path.append(os.path.join(tisbackup_root_dir,'lib'))
|
||||
sys.path.append(os.path.join(tisbackup_root_dir,'libtisbackup'))
|
||||
sys.path.append(os.path.join(tisbackup_root_dir, "lib"))
|
||||
sys.path.append(os.path.join(tisbackup_root_dir, "libtisbackup"))
|
||||
|
||||
|
||||
import glob
|
||||
@ -34,9 +34,7 @@ import time
|
||||
from shutil import *
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from flask import (Flask, Response, abort, appcontext_pushed, flash, g,
|
||||
jsonify, redirect, render_template, request, session,
|
||||
url_for)
|
||||
from flask import Flask, Response, abort, appcontext_pushed, flash, g, jsonify, redirect, render_template, request, session, url_for
|
||||
from iniparse import ConfigParser, RawConfigParser
|
||||
|
||||
from config import huey
|
||||
@ -47,24 +45,24 @@ from tisbackup import tis_backup
|
||||
cp = ConfigParser()
|
||||
cp.read("/etc/tis/tisbackup_gui.ini")
|
||||
|
||||
CONFIG = cp.get('general','config_tisbackup').split(",")
|
||||
SECTIONS = cp.get('general','sections')
|
||||
ADMIN_EMAIL = cp.get('general','ADMIN_EMAIL')
|
||||
BASE_DIR = cp.get('general','base_config_dir')
|
||||
CONFIG = cp.get("general", "config_tisbackup").split(",")
|
||||
SECTIONS = cp.get("general", "sections")
|
||||
ADMIN_EMAIL = cp.get("general", "ADMIN_EMAIL")
|
||||
BASE_DIR = cp.get("general", "base_config_dir")
|
||||
|
||||
tisbackup_config_file = CONFIG[0]
|
||||
config_number = 0
|
||||
|
||||
cp = ConfigParser()
|
||||
cp.read(tisbackup_config_file)
|
||||
backup_base_dir = cp.get('global','backup_base_dir')
|
||||
dbstat = BackupStat(os.path.join(backup_base_dir,'log','tisbackup.sqlite'))
|
||||
backup_base_dir = cp.get("global", "backup_base_dir")
|
||||
dbstat = BackupStat(os.path.join(backup_base_dir, "log", "tisbackup.sqlite"))
|
||||
mindate = None
|
||||
error = None
|
||||
info = None
|
||||
app = Flask(__name__)
|
||||
app.secret_key = 'fsiqefiuqsefARZ4Zfesfe34234dfzefzfe'
|
||||
app.config['PROPAGATE_EXCEPTIONS'] = True
|
||||
app.secret_key = "fsiqefiuqsefARZ4Zfesfe34234dfzefzfe"
|
||||
app.config["PROPAGATE_EXCEPTIONS"] = True
|
||||
|
||||
tasks_db = os.path.join(tisbackup_root_dir, "tasks.sqlite")
|
||||
|
||||
@ -80,19 +78,19 @@ def read_all_configs(base_dir):
|
||||
|
||||
for elem in raw_configs:
|
||||
line = open(elem).readline()
|
||||
if 'global' in line:
|
||||
if "global" in line:
|
||||
list_config.append(elem)
|
||||
|
||||
backup_dict = {}
|
||||
backup_dict['rsync_ssh_list'] = []
|
||||
backup_dict['rsync_btrfs_list'] = []
|
||||
backup_dict['rsync_list'] = []
|
||||
backup_dict['null_list'] = []
|
||||
backup_dict['pgsql_list'] = []
|
||||
backup_dict['mysql_list'] = []
|
||||
backup_dict["rsync_ssh_list"] = []
|
||||
backup_dict["rsync_btrfs_list"] = []
|
||||
backup_dict["rsync_list"] = []
|
||||
backup_dict["null_list"] = []
|
||||
backup_dict["pgsql_list"] = []
|
||||
backup_dict["mysql_list"] = []
|
||||
# backup_dict['sqlserver_list'] = []
|
||||
backup_dict['xva_list'] = []
|
||||
backup_dict['metadata_list'] = []
|
||||
backup_dict["xva_list"] = []
|
||||
backup_dict["metadata_list"] = []
|
||||
# backup_dict['switch_list'] = []
|
||||
# backup_dict['oracle_list'] = []
|
||||
|
||||
@ -101,7 +99,7 @@ def read_all_configs(base_dir):
|
||||
for config_file in list_config:
|
||||
cp.read(config_file)
|
||||
|
||||
backup_base_dir = cp.get('global', 'backup_base_dir')
|
||||
backup_base_dir = cp.get("global", "backup_base_dir")
|
||||
backup = tis_backup(backup_base_dir=backup_base_dir)
|
||||
backup.read_ini_file(config_file)
|
||||
|
||||
@ -113,7 +111,7 @@ def read_all_configs(base_dir):
|
||||
else:
|
||||
for b in backup_sections:
|
||||
if b not in all_sections:
|
||||
raise Exception('Section %s is not defined in config file' % b)
|
||||
raise Exception("Section %s is not defined in config file" % b)
|
||||
|
||||
# never used..
|
||||
# if not backup_sections:
|
||||
@ -128,35 +126,28 @@ def read_all_configs(base_dir):
|
||||
result.append(b)
|
||||
|
||||
for row in result:
|
||||
backup_name = row['backup_name']
|
||||
server_name = row['server_name']
|
||||
backup_type = row['type']
|
||||
backup_name = row["backup_name"]
|
||||
server_name = row["server_name"]
|
||||
backup_type = row["type"]
|
||||
if backup_type == "xcp-dump-metadata":
|
||||
backup_dict['metadata_list'].append(
|
||||
[server_name, backup_name, backup_type, ""])
|
||||
backup_dict["metadata_list"].append([server_name, backup_name, backup_type, ""])
|
||||
if backup_type == "rsync+ssh":
|
||||
remote_dir = row['remote_dir']
|
||||
backup_dict['rsync_ssh_list'].append(
|
||||
[server_name, backup_name, backup_type, remote_dir])
|
||||
remote_dir = row["remote_dir"]
|
||||
backup_dict["rsync_ssh_list"].append([server_name, backup_name, backup_type, remote_dir])
|
||||
if backup_type == "rsync+btrfs+ssh":
|
||||
remote_dir = row['remote_dir']
|
||||
backup_dict['rsync_btrfs_list'].append(
|
||||
[server_name, backup_name, backup_type, remote_dir])
|
||||
remote_dir = row["remote_dir"]
|
||||
backup_dict["rsync_btrfs_list"].append([server_name, backup_name, backup_type, remote_dir])
|
||||
if backup_type == "rsync":
|
||||
remote_dir = row['remote_dir']
|
||||
backup_dict['rsync_list'].append(
|
||||
[server_name, backup_name, backup_type, remote_dir])
|
||||
remote_dir = row["remote_dir"]
|
||||
backup_dict["rsync_list"].append([server_name, backup_name, backup_type, remote_dir])
|
||||
if backup_type == "null":
|
||||
backup_dict['null_list'].append(
|
||||
[server_name, backup_name, backup_type, ""])
|
||||
backup_dict["null_list"].append([server_name, backup_name, backup_type, ""])
|
||||
if backup_type == "pgsql+ssh":
|
||||
db_name = row['db_name'] if len(row['db_name']) > 0 else '*'
|
||||
backup_dict['pgsql_list'].append(
|
||||
[server_name, backup_name, backup_type, db_name])
|
||||
db_name = row["db_name"] if len(row["db_name"]) > 0 else "*"
|
||||
backup_dict["pgsql_list"].append([server_name, backup_name, backup_type, db_name])
|
||||
if backup_type == "mysql+ssh":
|
||||
db_name = row['db_name'] if len(row['db_name']) > 0 else '*'
|
||||
backup_dict['mysql_list'].append(
|
||||
[server_name, backup_name, backup_type, db_name])
|
||||
db_name = row["db_name"] if len(row["db_name"]) > 0 else "*"
|
||||
backup_dict["mysql_list"].append([server_name, backup_name, backup_type, db_name])
|
||||
# if backup_type == "sqlserver+ssh":
|
||||
# db_name = row['db_name']
|
||||
# backup_dict['sqlserver_list'].append(
|
||||
@ -166,8 +157,7 @@ def read_all_configs(base_dir):
|
||||
# backup_dict['oracle_list'].append(
|
||||
# [server_name, backup_name, backup_type, db_name])
|
||||
if backup_type == "xen-xva":
|
||||
backup_dict['xva_list'].append(
|
||||
[server_name, backup_name, backup_type, ""])
|
||||
backup_dict["xva_list"].append([server_name, backup_name, backup_type, ""])
|
||||
# if backup_type == "switch":
|
||||
# backup_dict['switch_list'].append(
|
||||
# [server_name, backup_name, backup_type, ""])
|
||||
@ -180,7 +170,7 @@ def read_config():
|
||||
cp = ConfigParser()
|
||||
cp.read(config_file)
|
||||
|
||||
backup_base_dir = cp.get('global','backup_base_dir')
|
||||
backup_base_dir = cp.get("global", "backup_base_dir")
|
||||
backup = tis_backup(backup_base_dir=backup_base_dir)
|
||||
backup.read_ini_file(config_file)
|
||||
|
||||
@ -192,7 +182,7 @@ def read_config():
|
||||
else:
|
||||
for b in backup_sections:
|
||||
if b not in all_sections:
|
||||
raise Exception('Section %s is not defined in config file' % b)
|
||||
raise Exception("Section %s is not defined in config file" % b)
|
||||
|
||||
result = []
|
||||
|
||||
@ -209,40 +199,40 @@ def read_config():
|
||||
result.append(b)
|
||||
|
||||
backup_dict = {}
|
||||
backup_dict['rsync_ssh_list'] = []
|
||||
backup_dict['rsync_btrfs_list'] = []
|
||||
backup_dict['rsync_list'] = []
|
||||
backup_dict['null_list'] = []
|
||||
backup_dict['pgsql_list'] = []
|
||||
backup_dict['mysql_list'] = []
|
||||
backup_dict["rsync_ssh_list"] = []
|
||||
backup_dict["rsync_btrfs_list"] = []
|
||||
backup_dict["rsync_list"] = []
|
||||
backup_dict["null_list"] = []
|
||||
backup_dict["pgsql_list"] = []
|
||||
backup_dict["mysql_list"] = []
|
||||
# backup_dict['sqlserver_list'] = []
|
||||
backup_dict['xva_list'] = []
|
||||
backup_dict['metadata_list'] = []
|
||||
backup_dict["xva_list"] = []
|
||||
backup_dict["metadata_list"] = []
|
||||
# backup_dict['switch_list'] = []
|
||||
# backup_dict['oracle_list'] = []
|
||||
for row in result:
|
||||
backup_name = row['backup_name']
|
||||
server_name = row['server_name']
|
||||
backup_type = row['type']
|
||||
backup_name = row["backup_name"]
|
||||
server_name = row["server_name"]
|
||||
backup_type = row["type"]
|
||||
if backup_type == "xcp-dump-metadata":
|
||||
backup_dict['metadata_list'].append([server_name, backup_name, backup_type, ""])
|
||||
backup_dict["metadata_list"].append([server_name, backup_name, backup_type, ""])
|
||||
if backup_type == "rsync+ssh":
|
||||
remote_dir = row['remote_dir']
|
||||
backup_dict['rsync_ssh_list'].append([server_name, backup_name, backup_type,remote_dir])
|
||||
remote_dir = row["remote_dir"]
|
||||
backup_dict["rsync_ssh_list"].append([server_name, backup_name, backup_type, remote_dir])
|
||||
if backup_type == "rsync+btrfs+ssh":
|
||||
remote_dir = row['remote_dir']
|
||||
backup_dict['rsync_btrfs_list'].append([server_name, backup_name, backup_type,remote_dir])
|
||||
remote_dir = row["remote_dir"]
|
||||
backup_dict["rsync_btrfs_list"].append([server_name, backup_name, backup_type, remote_dir])
|
||||
if backup_type == "rsync":
|
||||
remote_dir = row['remote_dir']
|
||||
backup_dict['rsync_list'].append([server_name, backup_name, backup_type,remote_dir])
|
||||
remote_dir = row["remote_dir"]
|
||||
backup_dict["rsync_list"].append([server_name, backup_name, backup_type, remote_dir])
|
||||
if backup_type == "null":
|
||||
backup_dict['null_list'].append([server_name, backup_name, backup_type, ""])
|
||||
backup_dict["null_list"].append([server_name, backup_name, backup_type, ""])
|
||||
if backup_type == "pgsql+ssh":
|
||||
db_name = row['db_name'] if len(row['db_name']) > 0 else '*'
|
||||
backup_dict['pgsql_list'].append([server_name, backup_name, backup_type, db_name])
|
||||
db_name = row["db_name"] if len(row["db_name"]) > 0 else "*"
|
||||
backup_dict["pgsql_list"].append([server_name, backup_name, backup_type, db_name])
|
||||
if backup_type == "mysql+ssh":
|
||||
db_name = row['db_name'] if len(row['db_name']) > 0 else '*'
|
||||
backup_dict['mysql_list'].append([server_name, backup_name, backup_type, db_name])
|
||||
db_name = row["db_name"] if len(row["db_name"]) > 0 else "*"
|
||||
backup_dict["mysql_list"].append([server_name, backup_name, backup_type, db_name])
|
||||
# if backup_type == "sqlserver+ssh":
|
||||
# db_name = row['db_name']
|
||||
# backup_dict['sqlserver_list'].append([server_name, backup_name, backup_type, db_name])
|
||||
@ -250,19 +240,20 @@ def read_config():
|
||||
# db_name = row['db_name']
|
||||
# backup_dict['oracle_list'].append([server_name, backup_name, backup_type, db_name])
|
||||
if backup_type == "xen-xva":
|
||||
backup_dict['xva_list'].append([server_name, backup_name, backup_type, ""])
|
||||
backup_dict["xva_list"].append([server_name, backup_name, backup_type, ""])
|
||||
# if backup_type == "switch":
|
||||
# backup_dict['switch_list'].append([server_name, backup_name, backup_type, ""])
|
||||
return backup_dict
|
||||
|
||||
@app.route('/')
|
||||
|
||||
@app.route("/")
|
||||
def backup_all():
|
||||
backup_dict = read_config()
|
||||
return render_template('backups.html', backup_list = backup_dict)
|
||||
return render_template("backups.html", backup_list=backup_dict)
|
||||
|
||||
|
||||
@app.route('/config_number/')
|
||||
@app.route('/config_number/<int:id>')
|
||||
@app.route("/config_number/")
|
||||
@app.route("/config_number/<int:id>")
|
||||
def set_config_number(id=None):
|
||||
if id is not None and len(CONFIG) > id:
|
||||
global config_number
|
||||
@ -271,17 +262,35 @@ def set_config_number(id=None):
|
||||
return jsonify(configs=CONFIG, config_number=config_number)
|
||||
|
||||
|
||||
@app.route('/all_json')
|
||||
@app.route("/all_json")
|
||||
def backup_all_json():
|
||||
backup_dict = read_all_configs(BASE_DIR)
|
||||
return json.dumps(backup_dict['rsync_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list'])
|
||||
return json.dumps(
|
||||
backup_dict["rsync_list"]
|
||||
+ backup_dict["rsync_btrfs_list"]
|
||||
+ backup_dict["rsync_ssh_list"]
|
||||
+ backup_dict["pgsql_list"]
|
||||
+ backup_dict["mysql_list"]
|
||||
+ backup_dict["xva_list"]
|
||||
+ backup_dict["null_list"]
|
||||
+ backup_dict["metadata_list"]
|
||||
)
|
||||
# + backup_dict['switch_list'])+backup_dict['sqlserver_list']
|
||||
|
||||
|
||||
@app.route('/json')
|
||||
@app.route("/json")
|
||||
def backup_json():
|
||||
backup_dict = read_config()
|
||||
return json.dumps(backup_dict['rsync_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list'])
|
||||
return json.dumps(
|
||||
backup_dict["rsync_list"]
|
||||
+ backup_dict["rsync_btrfs_list"]
|
||||
+ backup_dict["rsync_ssh_list"]
|
||||
+ backup_dict["pgsql_list"]
|
||||
+ backup_dict["mysql_list"]
|
||||
+ backup_dict["xva_list"]
|
||||
+ backup_dict["null_list"]
|
||||
+ backup_dict["metadata_list"]
|
||||
)
|
||||
# + backup_dict['switch_list'])+backup_dict['sqlserver_list']
|
||||
|
||||
|
||||
@ -289,7 +298,7 @@ def check_usb_disk():
|
||||
"""This method returns the mounts point of FIRST external disk"""
|
||||
# disk_name = []
|
||||
usb_disk_list = []
|
||||
for name in glob.glob('/dev/sd[a-z]'):
|
||||
for name in glob.glob("/dev/sd[a-z]"):
|
||||
for line in os.popen("udevadm info -q env -n %s" % name):
|
||||
if re.match("ID_PATH=.*usb.*", line):
|
||||
usb_disk_list += [name]
|
||||
@ -301,19 +310,22 @@ def check_usb_disk():
|
||||
|
||||
usb_partition_list = []
|
||||
for usb_disk in usb_disk_list:
|
||||
cmd = "udevadm info -q path -n %s" % usb_disk + '1'
|
||||
cmd = "udevadm info -q path -n %s" % usb_disk + "1"
|
||||
output = os.popen(cmd).read()
|
||||
print("cmd : " + cmd)
|
||||
print("output : " + output)
|
||||
|
||||
if '/devices/pci' in output:
|
||||
if "/devices/pci" in output:
|
||||
# flash("partition found: %s1" % usb_disk)
|
||||
usb_partition_list.append(usb_disk + "1")
|
||||
|
||||
print(usb_partition_list)
|
||||
|
||||
if len(usb_partition_list) == 0:
|
||||
raise_error("The drive %s has no partition" % (usb_disk_list[0] ), "You should initialize the usb drive and format an ext4 partition with TISBACKUP label")
|
||||
raise_error(
|
||||
"The drive %s has no partition" % (usb_disk_list[0]),
|
||||
"You should initialize the usb drive and format an ext4 partition with TISBACKUP label",
|
||||
)
|
||||
return ""
|
||||
|
||||
tisbackup_partition_list = []
|
||||
@ -325,31 +337,35 @@ def check_usb_disk():
|
||||
print(tisbackup_partition_list)
|
||||
|
||||
if len(tisbackup_partition_list) == 0:
|
||||
raise_error("No tisbackup partition exist on disk %s" % (usb_disk_list[0] ), "You should initialize the usb drive and format an ext4 partition with TISBACKUP label")
|
||||
raise_error(
|
||||
"No tisbackup partition exist on disk %s" % (usb_disk_list[0]),
|
||||
"You should initialize the usb drive and format an ext4 partition with TISBACKUP label",
|
||||
)
|
||||
return ""
|
||||
|
||||
if len(tisbackup_partition_list) > 1:
|
||||
raise_error("There are many usb disk", "You should plug remove one of them")
|
||||
return ""
|
||||
|
||||
|
||||
return tisbackup_partition_list[0]
|
||||
|
||||
|
||||
def check_already_mount(partition_name, refresh):
|
||||
with open('/proc/mounts') as f:
|
||||
with open("/proc/mounts") as f:
|
||||
mount_point = ""
|
||||
for line in f.readlines():
|
||||
if line.startswith(partition_name):
|
||||
mount_point = line.split(' ')[1]
|
||||
mount_point = line.split(" ")[1]
|
||||
if not refresh:
|
||||
run_command("/bin/umount %s" % mount_point)
|
||||
os.rmdir(mount_point)
|
||||
return mount_point
|
||||
|
||||
|
||||
def run_command(cmd, info=""):
|
||||
flash("Executing: %s" % cmd)
|
||||
from subprocess import CalledProcessError, check_output
|
||||
|
||||
result = ""
|
||||
try:
|
||||
result = check_output(cmd, stderr=subprocess.STDOUT, shell=True)
|
||||
@ -357,12 +373,12 @@ def run_command(cmd, info=""):
|
||||
raise_error(result, info)
|
||||
return result
|
||||
|
||||
|
||||
def check_mount_disk(partition_name, refresh):
|
||||
|
||||
mount_point = check_already_mount(partition_name, refresh)
|
||||
if not refresh:
|
||||
|
||||
|
||||
mount_point = "/mnt/TISBACKUP-" + str(time.time())
|
||||
os.mkdir(mount_point)
|
||||
flash("must mount " + partition_name)
|
||||
@ -374,7 +390,8 @@ def check_mount_disk(partition_name, refresh):
|
||||
|
||||
return mount_point
|
||||
|
||||
@app.route('/status.json')
|
||||
|
||||
@app.route("/status.json")
|
||||
def export_backup_status():
|
||||
exports = dbstat.query('select * from stats where TYPE="EXPORT" and backup_start>="%s"' % mindate)
|
||||
error = ""
|
||||
@ -384,31 +401,29 @@ def export_backup_status():
|
||||
if status != "ok":
|
||||
error = "Export failing with error: " + status
|
||||
|
||||
|
||||
return jsonify(data=exports, finish=finish, error=error)
|
||||
|
||||
|
||||
def runnings_backups():
|
||||
task = get_task()
|
||||
is_runnig = (task is not None)
|
||||
finish = ( is_runnig and task.get() is not None)
|
||||
is_runnig = task is not None
|
||||
finish = is_runnig and task.get() is not None
|
||||
return is_runnig and not finish
|
||||
|
||||
|
||||
@app.route('/backups.json')
|
||||
@app.route("/backups.json")
|
||||
def last_backup_json():
|
||||
exports = dbstat.query('select * from stats where TYPE="BACKUP" ORDER BY backup_start DESC ')
|
||||
return Response(response=json.dumps(exports),
|
||||
status=200,
|
||||
mimetype="application/json")
|
||||
return Response(response=json.dumps(exports), status=200, mimetype="application/json")
|
||||
|
||||
|
||||
@app.route('/last_backups')
|
||||
@app.route("/last_backups")
|
||||
def last_backup():
|
||||
exports = dbstat.query('select * from stats where TYPE="BACKUP" ORDER BY backup_start DESC LIMIT 20 ')
|
||||
return render_template("last_backups.html", backups=exports)
|
||||
|
||||
|
||||
@app.route('/export_backup')
|
||||
@app.route("/export_backup")
|
||||
def export_backup():
|
||||
|
||||
raise_error("", "")
|
||||
@ -423,12 +438,11 @@ def export_backup():
|
||||
if len(section) > 0:
|
||||
sections.append(section[1])
|
||||
|
||||
noJobs = (not runnings_backups())
|
||||
noJobs = not runnings_backups()
|
||||
if "start" in list(request.args.keys()) or not noJobs:
|
||||
start = True
|
||||
if "sections" in list(request.args.keys()):
|
||||
backup_sections = request.args.getlist('sections')
|
||||
|
||||
backup_sections = request.args.getlist("sections")
|
||||
|
||||
else:
|
||||
start = False
|
||||
@ -445,10 +459,14 @@ def export_backup():
|
||||
mindate = datetime2isodate(datetime.datetime.now())
|
||||
if not error and start:
|
||||
print(tisbackup_config_file)
|
||||
task = run_export_backup(base=backup_base_dir, config_file=CONFIG[config_number], mount_point=mount_point, backup_sections=",".join([str(x) for x in backup_sections]))
|
||||
task = run_export_backup(
|
||||
base=backup_base_dir,
|
||||
config_file=CONFIG[config_number],
|
||||
mount_point=mount_point,
|
||||
backup_sections=",".join([str(x) for x in backup_sections]),
|
||||
)
|
||||
set_task(task)
|
||||
|
||||
|
||||
return render_template("export_backup.html", error=error, start=start, info=info, email=ADMIN_EMAIL, sections=sections)
|
||||
|
||||
|
||||
@ -461,6 +479,7 @@ def raise_error(strError, strInfo):
|
||||
if __name__ == "__main__":
|
||||
read_config()
|
||||
from os import environ
|
||||
if 'WINGDB_ACTIVE' in environ:
|
||||
|
||||
if "WINGDB_ACTIVE" in environ:
|
||||
app.debug = False
|
||||
app.run(host= '0.0.0.0',port=8080)
|
||||
app.run(host="0.0.0.0", port=8080)
|
||||
|
Loading…
Reference in New Issue
Block a user