fix iniparse
All checks were successful
lint / docker (push) Successful in 9m14s

fix code passing ruff linter
pre-commit ruff
pre-commit ruff format
This commit is contained in:
k3nny 2024-11-29 22:54:39 +01:00
parent aa8a68aa80
commit 737f9bea38
27 changed files with 2375 additions and 2016 deletions

View File

@ -1,7 +1,16 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.8.1
hooks:
# Run the linter.
- id: ruff
# Run the formatter.
- id: ruff-format

View File

@ -6,4 +6,4 @@ from huey.storage import SqliteStorage
tisbackup_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
tasks_db = os.path.join(tisbackup_root_dir, "tasks.sqlite")
huey = SqlHuey(name="tisbackups",filename=tasks_db,always_eager=False,storage_class=SqliteStorage)
huey = SqlHuey(name="tisbackups", filename=tasks_db, always_eager=False, storage_class=SqliteStorage)

View File

@ -30,50 +30,50 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = 'TISBackup'
copyright = '2020, Tranquil IT'
author = 'Tranquil IT'
project = "TISBackup"
copyright = "2020, Tranquil IT"
author = "Tranquil IT"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.8'
version = "1.8"
# The full version, including alpha/beta/rc tags.
release = '1.8.2'
release = "1.8.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
locale_dirs = ['locale/']
language = "en"
locale_dirs = ["locale/"]
gettext_compact = False
# There are two options for replacing |today|: either, you set today to some
@ -110,7 +110,7 @@ exclude_patterns = []
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@ -126,18 +126,19 @@ todo_include_todos = True
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_favicon = "_static/favicon.ico"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {
'css_files': [
'_static/css/custom.css', # overrides for wide tables in RTD theme
'_static/css/ribbon.css',
'_static/theme_overrides.css', # override wide tables in RTD theme
"css_files": [
"_static/css/custom.css", # overrides for wide tables in RTD theme
"_static/css/ribbon.css",
"_static/theme_overrides.css", # override wide tables in RTD theme
],
}
except ImportError as e:
html_theme = 'alabaster'
except ImportError as e: # noqa : F841
html_theme = "alabaster"
html_theme_path = []
@ -178,7 +179,7 @@ except ImportError as e:
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
@ -258,15 +259,13 @@ html_static_path = ['_static']
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tisbackupdoc'
htmlhelp_basename = "tisbackupdoc"
# -- Linkcheck -------------------
# make linkcheck
# URL patterns to ignore
linkcheck_ignore = [r'http.*://.*mydomain.lan.*',
r'http.*://.*host_fqdn.*',
r'http://user:pwd@host_fqdn:port']
linkcheck_ignore = [r"http.*://.*mydomain.lan.*", r"http.*://.*host_fqdn.*", r"http://user:pwd@host_fqdn:port"]
# -- Options for LaTeX output ---------------------------------------------
@ -279,23 +278,20 @@ linkcheck_ignore = [r'http.*://.*mydomain.lan.*',
# > \setlength\paperwidth {15.59cm}}
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
'papersize': 'lulupaper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '9pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': r'\batchmode',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
'sphinxsetup': 'hmargin={1.5cm,1.5cm}, vmargin={3cm,3cm}, marginpar=1cm',
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
"papersize": "lulupaper",
# The font size ('10pt', '11pt' or '12pt').
#
"pointsize": "9pt",
# Additional stuff for the LaTeX preamble.
#
"preamble": r"\batchmode",
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
"sphinxsetup": "hmargin={1.5cm,1.5cm}, vmargin={3cm,3cm}, marginpar=1cm",
}
@ -303,7 +299,7 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tisbackup.tex', 'TISBackup Documentation', 'Tranquil IT', 'manual'),
(master_doc, "tisbackup.tex", "TISBackup Documentation", "Tranquil IT", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
@ -343,10 +339,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tisbackup', 'TISBackup Documentation',
[author], 1)
]
man_pages = [(master_doc, "tisbackup", "TISBackup Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
@ -359,9 +352,15 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tisbackup', 'TISBackup Documentation',
author, 'Tranquil IT', 'The objective of TISbackup is to benefit from file backups and centralized alert feedback on "reasonable" data volumes.',
'Miscellaneous'),
(
master_doc,
"tisbackup",
"TISBackup Documentation",
author,
"Tranquil IT",
'The objective of TISbackup is to benefit from file backups and centralized alert feedback on "reasonable" data volumes.',
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
@ -382,7 +381,7 @@ texinfo_documents = [
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
intersphinx_mapping = {"https://docs.python.org/": None}
# -- Options for Epub output ----------------------------------------------
@ -438,7 +437,7 @@ epub_copyright = copyright
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
#

View File

@ -61,10 +61,11 @@ import sys
import six.moves.http_client as httplib
import six.moves.xmlrpc_client as xmlrpclib
translation = gettext.translation('xen-xm', fallback = True)
translation = gettext.translation("xen-xm", fallback=True)
API_VERSION_1_1 = "1.1"
API_VERSION_1_2 = "1.2"
API_VERSION_1_1 = '1.1'
API_VERSION_1_2 = '1.2'
class Failure(Exception):
def __init__(self, details):
@ -79,41 +80,48 @@ class Failure(Exception):
return msg
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
return dict([(str(i), self.details[i]) for i in range(len(self.details))])
# Just a "constant" that we use to decide whether to retry the RPC
_RECONNECT_AND_RETRY = object()
class UDSHTTPConnection(httplib.HTTPConnection):
"""HTTPConnection subclass to allow HTTP over Unix domain sockets. """
"""HTTPConnection subclass to allow HTTP over Unix domain sockets."""
def connect(self):
path = self.host.replace("_", "/")
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(path)
class UDSHTTP(httplib.HTTPConnection):
_connection_class = UDSHTTPConnection
class UDSTransport(xmlrpclib.Transport):
def __init__(self, use_datetime=0):
self._use_datetime = use_datetime
self._extra_headers=[]
self._extra_headers = []
self._connection = (None, None)
def add_extra_header(self, key, value):
self._extra_headers += [ (key,value) ]
self._extra_headers += [(key, value)]
def make_connection(self, host):
# Python 2.4 compatibility
if sys.version_info[0] <= 2 and sys.version_info[1] < 7:
return UDSHTTP(host)
else:
return UDSHTTPConnection(host)
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", handler)
for key, value in self._extra_headers:
connection.putheader(key, value)
class Session(xmlrpclib.ServerProxy):
"""A server proxy and session manager for communicating with xapi using
the Xen-API.
@ -126,32 +134,27 @@ class Session(xmlrpclib.ServerProxy):
session.xenapi.session.logout()
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1, ignore_ssl=False):
def __init__(self, uri, transport=None, encoding=None, verbose=0, allow_none=1, ignore_ssl=False):
# Fix for CA-172901 (+ Python 2.4 compatibility)
# Fix for context=ctx ( < Python 2.7.9 compatibility)
if not (sys.version_info[0] <= 2 and sys.version_info[1] <= 7 and sys.version_info[2] <= 9 ) \
and ignore_ssl:
if not (sys.version_info[0] <= 2 and sys.version_info[1] <= 7 and sys.version_info[2] <= 9) and ignore_ssl:
import ssl
ctx = ssl._create_unverified_context()
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
verbose, allow_none, context=ctx)
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose, allow_none, context=ctx)
else:
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
verbose, allow_none)
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose, allow_none)
self.transport = transport
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
if methodname.startswith("login"):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
elif methodname == "logout" or methodname == "session.logout":
self._logout()
return None
else:
@ -162,29 +165,25 @@ class Session(xmlrpclib.ServerProxy):
if result is _RECONNECT_AND_RETRY:
retry_count += 1
if self.last_login_method:
self._login(self.last_login_method,
self.last_login_params)
self._login(self.last_login_method, self.last_login_params)
else:
raise xmlrpclib.Fault(401, 'You must log in')
raise xmlrpclib.Fault(401, "You must log in")
else:
return result
raise xmlrpclib.Fault(
500, 'Tried 3 times to get a valid session, but failed')
raise xmlrpclib.Fault(500, "Tried 3 times to get a valid session, but failed")
def _login(self, method, params):
try:
result = _parse_result(
getattr(self, 'session.%s' % method)(*params))
result = _parse_result(getattr(self, "session.%s" % method)(*params))
if result is _RECONNECT_AND_RETRY:
raise xmlrpclib.Fault(
500, 'Received SESSION_INVALID when logging in')
raise xmlrpclib.Fault(500, "Received SESSION_INVALID when logging in")
self._session = result
self.last_login_method = method
self.last_login_params = params
self.API_version = self._get_api_version()
except socket.error as e:
if e.errno == socket.errno.ETIMEDOUT:
raise xmlrpclib.Fault(504, 'The connection timed out')
raise xmlrpclib.Fault(504, "The connection timed out")
else:
raise e
@ -205,41 +204,41 @@ class Session(xmlrpclib.ServerProxy):
host = self.xenapi.pool.get_master(pool)
major = self.xenapi.host.get_API_version_major(host)
minor = self.xenapi.host.get_API_version_minor(host)
return "%s.%s"%(major,minor)
return "%s.%s" % (major, minor)
def __getattr__(self, name):
if name == 'handle':
if name == "handle":
return self._session
elif name == 'xenapi':
elif name == "xenapi":
return _Dispatcher(self.API_version, self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
elif name.startswith("login") or name.startswith("slave_local"):
return lambda *params: self._login(name, params)
elif name == 'logout':
elif name == "logout":
return _Dispatcher(self.API_version, self.xenapi_request, "logout")
else:
return xmlrpclib.ServerProxy.__getattr__(self, name)
def xapi_local():
return Session("http://_var_lib_xcp_xapi/", transport=UDSTransport())
def _parse_result(result):
if type(result) != dict or 'Status' not in result:
raise xmlrpclib.Fault(500, 'Missing Status in response from server' + result)
if result['Status'] == 'Success':
if 'Value' in result:
return result['Value']
if not isinstance(type(result), dict) or "Status" not in result:
raise xmlrpclib.Fault(500, "Missing Status in response from server" + result)
if result["Status"] == "Success":
if "Value" in result:
return result["Value"]
else:
raise xmlrpclib.Fault(500,
'Missing Value in response from server')
raise xmlrpclib.Fault(500, "Missing Value in response from server")
else:
if 'ErrorDescription' in result:
if result['ErrorDescription'][0] == 'SESSION_INVALID':
if "ErrorDescription" in result:
if result["ErrorDescription"][0] == "SESSION_INVALID":
return _RECONNECT_AND_RETRY
else:
raise Failure(result['ErrorDescription'])
raise Failure(result["ErrorDescription"])
else:
raise xmlrpclib.Fault(
500, 'Missing ErrorDescription in response from server')
raise xmlrpclib.Fault(500, "Missing ErrorDescription in response from server")
# Based upon _Method from xmlrpclib.
@ -251,9 +250,9 @@ class _Dispatcher:
def __repr__(self):
if self.__name:
return '<XenAPI._Dispatcher for %s>' % self.__name
return "<XenAPI._Dispatcher for %s>" % self.__name
else:
return '<XenAPI._Dispatcher>'
return "<XenAPI._Dispatcher>"
def __getattr__(self, name):
if self.__name is None:

View File

@ -19,11 +19,10 @@
# -----------------------------------------------------------------------
import sys
try:
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
sys.stderr = open("/dev/null") # Silence silly warnings from paramiko
import paramiko
except ImportError as e:
print(("Error : can not load paramiko library %s" % e))
@ -36,19 +35,19 @@ from libtisbackup.common import *
class backup_mysql(backup_generic):
"""Backup a mysql database as gzipped sql file through ssh"""
type = 'mysql+ssh'
required_params = backup_generic.required_params + ['db_user','db_passwd','private_key']
optional_params = backup_generic.optional_params + ['db_name']
db_name=''
db_user=''
db_passwd=''
type = "mysql+ssh"
required_params = backup_generic.required_params + ["db_user", "db_passwd", "private_key"]
optional_params = backup_generic.optional_params + ["db_name"]
db_name = ""
db_user = ""
db_passwd = ""
dest_dir = ""
def do_backup(self,stats):
self.dest_dir = os.path.join(self.backup_dir,self.backup_start_date)
def do_backup(self, stats):
self.dest_dir = os.path.join(self.backup_dir, self.backup_start_date)
if not os.path.isdir(self.dest_dir):
if not self.dry_run:
@ -56,126 +55,145 @@ class backup_mysql(backup_generic):
else:
print(('mkdir "%s"' % self.dest_dir))
else:
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
raise Exception("backup destination directory already exists : %s" % self.dest_dir)
self.logger.debug('[%s] Connecting to %s with user root and key %s',self.backup_name,self.server_name,self.private_key)
self.logger.debug("[%s] Connecting to %s with user root and key %s", self.backup_name, self.server_name, self.private_key)
try:
mykey = paramiko.RSAKey.from_private_key_file(self.private_key)
except paramiko.SSHException:
#mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
# mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
mykey = paramiko.Ed25519Key.from_private_key_file(self.private_key)
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.server_name,username='root',pkey = mykey, port=self.ssh_port)
self.ssh.connect(self.server_name, username="root", pkey=mykey, port=self.ssh_port)
self.db_passwd=self.db_passwd.replace('$','\$')
self.db_passwd = self.db_passwd.replace("$", "\$")
if not self.db_name:
stats['log']= "Successfully backuping processed to the following databases :"
stats['status']='List'
cmd = 'mysql -N -B -p -e "SHOW DATABASES;" -u ' + self.db_user +' -p' + self.db_passwd + ' 2> /dev/null'
self.logger.debug('[%s] List databases: %s',self.backup_name,cmd)
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
stats["log"] = "Successfully backuping processed to the following databases :"
stats["status"] = "List"
cmd = 'mysql -N -B -p -e "SHOW DATABASES;" -u ' + self.db_user + " -p" + self.db_passwd + " 2> /dev/null"
self.logger.debug("[%s] List databases: %s", self.backup_name, cmd)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
databases = output.split('\n')
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
databases = output.split("\n")
for database in databases:
if database != "":
self.db_name = database.rstrip()
self.do_mysqldump(stats)
else:
stats['log']= "Successfully backup processed to the following database :"
stats["log"] = "Successfully backup processed to the following database :"
self.do_mysqldump(stats)
def do_mysqldump(self,stats):
def do_mysqldump(self, stats):
t = datetime.datetime.now()
backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
backup_start_date = t.strftime("%Y%m%d-%Hh%Mm%S")
# dump db
stats['status']='Dumping'
cmd = 'mysqldump --single-transaction -u' + self.db_user +' -p' + self.db_passwd + ' ' + self.db_name + ' > /tmp/' + self.db_name + '-' + backup_start_date + '.sql'
self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd)
stats["status"] = "Dumping"
cmd = (
"mysqldump --single-transaction -u"
+ self.db_user
+ " -p"
+ self.db_passwd
+ " "
+ self.db_name
+ " > /tmp/"
+ self.db_name
+ "-"
+ backup_start_date
+ ".sql"
)
self.logger.debug("[%s] Dump DB : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
print(output)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
# zip the file
stats['status']='Zipping'
cmd = 'gzip /tmp/' + self.db_name + '-' + backup_start_date + '.sql'
self.logger.debug('[%s] Compress backup : %s',self.backup_name,cmd)
stats["status"] = "Zipping"
cmd = "gzip /tmp/" + self.db_name + "-" + backup_start_date + ".sql"
self.logger.debug("[%s] Compress backup : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
# get the file
stats['status']='SFTP'
filepath = '/tmp/' + self.db_name + '-' + backup_start_date + '.sql.gz'
localpath = os.path.join(self.dest_dir , self.db_name + '.sql.gz')
self.logger.debug('[%s] Get gz backup with sftp on %s from %s to %s',self.backup_name,self.server_name,filepath,localpath)
stats["status"] = "SFTP"
filepath = "/tmp/" + self.db_name + "-" + backup_start_date + ".sql.gz"
localpath = os.path.join(self.dest_dir, self.db_name + ".sql.gz")
self.logger.debug("[%s] Get gz backup with sftp on %s from %s to %s", self.backup_name, self.server_name, filepath, localpath)
if not self.dry_run:
transport = self.ssh.get_transport()
transport = self.ssh.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.get(filepath, localpath)
sftp.close()
if not self.dry_run:
stats['total_files_count']=1 + stats.get('total_files_count', 0)
stats['written_files_count']=1 + stats.get('written_files_count', 0)
stats['total_bytes']=os.stat(localpath).st_size + stats.get('total_bytes', 0)
stats['written_bytes']=os.stat(localpath).st_size + stats.get('written_bytes', 0)
stats['log'] = '%s "%s"' % (stats['log'] ,self.db_name)
stats['backup_location'] = self.dest_dir
stats["total_files_count"] = 1 + stats.get("total_files_count", 0)
stats["written_files_count"] = 1 + stats.get("written_files_count", 0)
stats["total_bytes"] = os.stat(localpath).st_size + stats.get("total_bytes", 0)
stats["written_bytes"] = os.stat(localpath).st_size + stats.get("written_bytes", 0)
stats["log"] = '%s "%s"' % (stats["log"], self.db_name)
stats["backup_location"] = self.dest_dir
stats['status']='RMTemp'
cmd = 'rm -f /tmp/' + self.db_name + '-' + backup_start_date + '.sql.gz'
self.logger.debug('[%s] Remove temp gzip : %s',self.backup_name,cmd)
stats["status"] = "RMTemp"
cmd = "rm -f /tmp/" + self.db_name + "-" + backup_start_date + ".sql.gz"
self.logger.debug("[%s] Remove temp gzip : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
stats['status']='OK'
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
stats["status"] = "OK"
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
p = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$')
p = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$")
for item in filelist:
if p.match(item):
dir_name = os.path.join(self.backup_dir,item)
if not dir_name in registered:
start = datetime.datetime.strptime(item,'%Y%m%d-%Hh%Mm%S').isoformat()
if fileisodate(dir_name)>start:
dir_name = os.path.join(self.backup_dir, item)
if dir_name not in registered:
start = datetime.datetime.strptime(item, "%Y%m%d-%Hh%Mm%S").isoformat()
if fileisodate(dir_name) > start:
stop = fileisodate(dir_name)
else:
stop = start
self.logger.info('Registering %s started on %s',dir_name,start)
self.logger.debug(' Disk usage %s','du -sb "%s"' % dir_name)
self.logger.info("Registering %s started on %s", dir_name, start)
self.logger.debug(" Disk usage %s", 'du -sb "%s"' % dir_name)
if not self.dry_run:
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split('\t')[0])
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split("\t")[0])
else:
size_bytes = 0
self.logger.debug(' Size in bytes : %i',size_bytes)
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end = stop,status='OK',total_bytes=size_bytes,backup_location=dir_name)
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=stop,
status="OK",
total_bytes=size_bytes,
backup_location=dir_name,
)
else:
self.logger.info('Skipping %s, already registered',dir_name)
self.logger.info("Skipping %s, already registered", dir_name)
register_driver(backup_mysql)

View File

@ -27,25 +27,32 @@ from .common import *
class backup_null(backup_generic):
"""Null backup to register servers which don't need any backups
but we still want to know they are taken in account"""
type = 'null'
required_params = ['type','server_name','backup_name']
type = "null"
required_params = ["type", "server_name", "backup_name"]
optional_params = []
def do_backup(self,stats):
def do_backup(self, stats):
pass
def process_backup(self):
pass
def cleanup_backup(self):
pass
def register_existingbackups(self):
pass
def export_latestbackup(self,destdir):
def export_latestbackup(self, destdir):
return {}
def checknagios(self,maxage_hours=30):
return (nagiosStateOk,"No backups needs to be performed")
def checknagios(self, maxage_hours=30):
return (nagiosStateOk, "No backups needs to be performed")
register_driver(backup_null)
if __name__=='__main__':
if __name__ == "__main__":
pass

View File

@ -20,7 +20,7 @@
import sys
try:
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
sys.stderr = open("/dev/null") # Silence silly warnings from paramiko
import paramiko
except ImportError as e:
print(("Error : can not load paramiko library %s" % e))
@ -38,140 +38,158 @@ from libtisbackup.common import *
class backup_oracle(backup_generic):
"""Backup a oracle database as zipped file through ssh"""
type = 'oracle+ssh'
required_params = backup_generic.required_params + ['db_name','private_key', 'userid']
optional_params = ['username', 'remote_backup_dir', 'ignore_error_oracle_code']
db_name=''
username='oracle'
remote_backup_dir = r'/home/oracle/backup'
ignore_error_oracle_code = [ ]
def do_backup(self,stats):
type = "oracle+ssh"
required_params = backup_generic.required_params + ["db_name", "private_key", "userid"]
optional_params = ["username", "remote_backup_dir", "ignore_error_oracle_code"]
db_name = ""
username = "oracle"
remote_backup_dir = r"/home/oracle/backup"
ignore_error_oracle_code = []
self.logger.debug('[%s] Connecting to %s with user %s and key %s',self.backup_name,self.server_name,self.username,self.private_key)
def do_backup(self, stats):
self.logger.debug(
"[%s] Connecting to %s with user %s and key %s", self.backup_name, self.server_name, self.username, self.private_key
)
try:
mykey = paramiko.RSAKey.from_private_key_file(self.private_key)
except paramiko.SSHException:
#mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
mykey = paramiko.Ed25519Key.from_private_key_file(self.private_key)
# mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
mykey = paramiko.Ed25519Key.from_private_key_file(self.private_key)
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.server_name,username=self.username,pkey = mykey,port=self.ssh_port)
self.ssh.connect(self.server_name, username=self.username, pkey=mykey, port=self.ssh_port)
t = datetime.datetime.now()
self.backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
dumpfile= self.remote_backup_dir + '/' + self.db_name + '_' + self.backup_start_date+'.dmp'
dumplog = self.remote_backup_dir + '/' + self.db_name + '_' + self.backup_start_date+'.log'
self.backup_start_date = t.strftime("%Y%m%d-%Hh%Mm%S")
dumpfile = self.remote_backup_dir + "/" + self.db_name + "_" + self.backup_start_date + ".dmp"
dumplog = self.remote_backup_dir + "/" + self.db_name + "_" + self.backup_start_date + ".log"
self.dest_dir = os.path.join(self.backup_dir,self.backup_start_date)
self.dest_dir = os.path.join(self.backup_dir, self.backup_start_date)
if not os.path.isdir(self.dest_dir):
if not self.dry_run:
os.makedirs(self.dest_dir)
else:
print(('mkdir "%s"' % self.dest_dir))
else:
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
raise Exception("backup destination directory already exists : %s" % self.dest_dir)
# dump db
stats['status']='Dumping'
cmd = "exp '%s' file='%s' grants=y log='%s'"% (self.userid,dumpfile, dumplog)
self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd)
stats["status"] = "Dumping"
cmd = "exp '%s' file='%s' grants=y log='%s'" % (self.userid, dumpfile, dumplog)
self.logger.debug("[%s] Dump DB : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
localpath = os.path.join(self.dest_dir , self.db_name + '.log')
self.logger.debug('[%s] Get log file with sftp on %s from %s to %s',self.backup_name,self.server_name,dumplog,localpath)
transport = self.ssh.get_transport()
localpath = os.path.join(self.dest_dir, self.db_name + ".log")
self.logger.debug("[%s] Get log file with sftp on %s from %s to %s", self.backup_name, self.server_name, dumplog, localpath)
transport = self.ssh.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.get(dumplog, localpath)
sftp.close()
file = open(localpath)
for line in file:
if re.search('EXP-[0-9]+:', line) and not re.match('EXP-[0-9]+:', line).group(0).replace(':','') in self.ignore_error_oracle_code:
stats['status']='RMTemp'
self.clean_dumpfiles(dumpfile,dumplog)
raise Exception('Aborting, Not null exit code (%s) for "%s"' % (re.match('EXP-[0-9]+:', line).group(0).replace(':',''),cmd))
if (
re.search("EXP-[0-9]+:", line)
and re.match("EXP-[0-9]+:", line).group(0).replace(":", "") not in self.ignore_error_oracle_code
):
stats["status"] = "RMTemp"
self.clean_dumpfiles(dumpfile, dumplog)
raise Exception(
'Aborting, Not null exit code (%s) for "%s"' % (re.match("EXP-[0-9]+:", line).group(0).replace(":", ""), cmd)
)
file.close()
# zip the file
stats['status']='Zipping'
cmd = 'gzip %s' % dumpfile
self.logger.debug('[%s] Compress backup : %s',self.backup_name,cmd)
stats["status"] = "Zipping"
cmd = "gzip %s" % dumpfile
self.logger.debug("[%s] Compress backup : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
# get the file
stats['status']='SFTP'
filepath = dumpfile + '.gz'
localpath = os.path.join(self.dest_dir , self.db_name + '.dmp.gz')
self.logger.debug('[%s] Get gz backup with sftp on %s from %s to %s',self.backup_name,self.server_name,filepath,localpath)
stats["status"] = "SFTP"
filepath = dumpfile + ".gz"
localpath = os.path.join(self.dest_dir, self.db_name + ".dmp.gz")
self.logger.debug("[%s] Get gz backup with sftp on %s from %s to %s", self.backup_name, self.server_name, filepath, localpath)
if not self.dry_run:
transport = self.ssh.get_transport()
transport = self.ssh.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.get(filepath, localpath)
sftp.close()
if not self.dry_run:
stats['total_files_count']=1
stats['written_files_count']=1
stats['total_bytes']=os.stat(localpath).st_size
stats['written_bytes']=os.stat(localpath).st_size
stats['log']='gzip dump of DB %s:%s (%d bytes) to %s' % (self.server_name,self.db_name, stats['written_bytes'], localpath)
stats['backup_location'] = self.dest_dir
stats['status']='RMTemp'
self.clean_dumpfiles(dumpfile,dumplog)
stats['status']='OK'
stats["total_files_count"] = 1
stats["written_files_count"] = 1
stats["total_bytes"] = os.stat(localpath).st_size
stats["written_bytes"] = os.stat(localpath).st_size
stats["log"] = "gzip dump of DB %s:%s (%d bytes) to %s" % (self.server_name, self.db_name, stats["written_bytes"], localpath)
stats["backup_location"] = self.dest_dir
stats["status"] = "RMTemp"
self.clean_dumpfiles(dumpfile, dumplog)
stats["status"] = "OK"
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
p = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$')
p = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$")
for item in filelist:
if p.match(item):
dir_name = os.path.join(self.backup_dir,item)
if not dir_name in registered:
start = datetime.datetime.strptime(item,'%Y%m%d-%Hh%Mm%S').isoformat()
if fileisodate(dir_name)>start:
dir_name = os.path.join(self.backup_dir, item)
if dir_name not in registered:
start = datetime.datetime.strptime(item, "%Y%m%d-%Hh%Mm%S").isoformat()
if fileisodate(dir_name) > start:
stop = fileisodate(dir_name)
else:
stop = start
self.logger.info('Registering %s started on %s',dir_name,start)
self.logger.debug(' Disk usage %s','du -sb "%s"' % dir_name)
self.logger.info("Registering %s started on %s", dir_name, start)
self.logger.debug(" Disk usage %s", 'du -sb "%s"' % dir_name)
if not self.dry_run:
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split('\t')[0])
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split("\t")[0])
else:
size_bytes = 0
self.logger.debug(' Size in bytes : %i',size_bytes)
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end = stop,status='OK',total_bytes=size_bytes,backup_location=dir_name)
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=stop,
status="OK",
total_bytes=size_bytes,
backup_location=dir_name,
)
else:
self.logger.info('Skipping %s, already registered',dir_name)
self.logger.info("Skipping %s, already registered", dir_name)
def clean_dumpfiles(self, dumpfile, dumplog):
cmd = 'rm -f "%s.gz" "%s"' % (dumpfile, dumplog)
self.logger.debug("[%s] Remove temp gzip : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
cmd = "rm -f " + self.remote_backup_dir + "/" + self.db_name + "_" + self.backup_start_date + ".dmp"
self.logger.debug("[%s] Remove temp dump : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
def clean_dumpfiles(self,dumpfile,dumplog):
cmd = 'rm -f "%s.gz" "%s"' %( dumpfile , dumplog)
self.logger.debug('[%s] Remove temp gzip : %s',self.backup_name,cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
cmd = 'rm -f '+self.remote_backup_dir + '/' + self.db_name + '_' + self.backup_start_date+'.dmp'
self.logger.debug('[%s] Remove temp dump : %s',self.backup_name,cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
register_driver(backup_oracle)

View File

@ -20,7 +20,7 @@
import sys
try:
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
sys.stderr = open("/dev/null") # Silence silly warnings from paramiko
import paramiko
except ImportError as e:
print(("Error : can not load paramiko library %s" % e))
@ -33,16 +33,17 @@ from .common import *
class backup_pgsql(backup_generic):
"""Backup a postgresql database as gzipped sql file through ssh"""
type = 'pgsql+ssh'
required_params = backup_generic.required_params + ['private_key']
optional_params = backup_generic.optional_params + ['db_name','tmp_dir','encoding']
db_name = ''
tmp_dir = '/tmp'
encoding = 'UTF8'
type = "pgsql+ssh"
required_params = backup_generic.required_params + ["private_key"]
optional_params = backup_generic.optional_params + ["db_name", "tmp_dir", "encoding"]
def do_backup(self,stats):
self.dest_dir = os.path.join(self.backup_dir,self.backup_start_date)
db_name = ""
tmp_dir = "/tmp"
encoding = "UTF8"
def do_backup(self, stats):
self.dest_dir = os.path.join(self.backup_dir, self.backup_start_date)
if not os.path.isdir(self.dest_dir):
if not self.dry_run:
@ -50,117 +51,127 @@ class backup_pgsql(backup_generic):
else:
print(('mkdir "%s"' % self.dest_dir))
else:
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
raise Exception("backup destination directory already exists : %s" % self.dest_dir)
try:
mykey = paramiko.RSAKey.from_private_key_file(self.private_key)
except paramiko.SSHException:
#mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
# mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
mykey = paramiko.Ed25519Key.from_private_key_file(self.private_key)
self.logger.debug('[%s] Trying to connect to "%s" with username root and key "%s"',self.backup_name,self.server_name,self.private_key)
self.logger.debug(
'[%s] Trying to connect to "%s" with username root and key "%s"', self.backup_name, self.server_name, self.private_key
)
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.server_name,username='root',pkey = mykey,port=self.ssh_port)
self.ssh.connect(self.server_name, username="root", pkey=mykey, port=self.ssh_port)
if self.db_name:
stats['log']= "Successfully backup processed to the following database :"
if self.db_name:
stats["log"] = "Successfully backup processed to the following database :"
self.do_pgsqldump(stats)
else:
stats['log']= "Successfully backuping processed to the following databases :"
stats['status']='List'
stats["log"] = "Successfully backuping processed to the following databases :"
stats["status"] = "List"
cmd = """su - postgres -c 'psql -A -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;"' 2> /dev/null"""
self.logger.debug('[%s] List databases: %s',self.backup_name,cmd)
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
self.logger.debug("[%s] List databases: %s", self.backup_name, cmd)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
databases = output.split('\n')
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
databases = output.split("\n")
for database in databases:
if database.strip() not in ("", "template0", "template1"):
self.db_name = database.strip()
self.do_pgsqldump(stats)
stats["status"] = "OK"
stats['status']='OK'
def do_pgsqldump(self,stats):
def do_pgsqldump(self, stats):
t = datetime.datetime.now()
backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
params = {
'encoding':self.encoding,
'db_name':self.db_name,
'tmp_dir':self.tmp_dir,
'dest_dir':self.dest_dir,
'backup_start_date':backup_start_date}
backup_start_date = t.strftime("%Y%m%d-%Hh%Mm%S")
params = {
"encoding": self.encoding,
"db_name": self.db_name,
"tmp_dir": self.tmp_dir,
"dest_dir": self.dest_dir,
"backup_start_date": backup_start_date,
}
# dump db
filepath = '%(tmp_dir)s/%(db_name)s-%(backup_start_date)s.sql.gz' % params
filepath = "%(tmp_dir)s/%(db_name)s-%(backup_start_date)s.sql.gz" % params
cmd = "su - postgres -c 'pg_dump -E %(encoding)s -Z9 %(db_name)s'" % params
cmd += ' > ' + filepath
self.logger.debug('[%s] %s ',self.backup_name,cmd)
cmd += " > " + filepath
self.logger.debug("[%s] %s ", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
# get the file
localpath = '%(dest_dir)s/%(db_name)s-%(backup_start_date)s.sql.gz' % params
self.logger.debug('[%s] get the file using sftp from "%s" to "%s" ',self.backup_name,filepath,localpath)
localpath = "%(dest_dir)s/%(db_name)s-%(backup_start_date)s.sql.gz" % params
self.logger.debug('[%s] get the file using sftp from "%s" to "%s" ', self.backup_name, filepath, localpath)
if not self.dry_run:
transport = self.ssh.get_transport()
transport = self.ssh.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.get(filepath, localpath)
sftp.close()
if not self.dry_run:
stats['total_files_count']=1 + stats.get('total_files_count', 0)
stats['written_files_count']=1 + stats.get('written_files_count', 0)
stats['total_bytes']=os.stat(localpath).st_size + stats.get('total_bytes', 0)
stats['written_bytes']=os.stat(localpath).st_size + stats.get('written_bytes', 0)
stats['log'] = '%s "%s"' % (stats['log'] ,self.db_name)
stats['backup_location'] = self.dest_dir
stats["total_files_count"] = 1 + stats.get("total_files_count", 0)
stats["written_files_count"] = 1 + stats.get("written_files_count", 0)
stats["total_bytes"] = os.stat(localpath).st_size + stats.get("total_bytes", 0)
stats["written_bytes"] = os.stat(localpath).st_size + stats.get("written_bytes", 0)
stats["log"] = '%s "%s"' % (stats["log"], self.db_name)
stats["backup_location"] = self.dest_dir
cmd = 'rm -f %(tmp_dir)s/%(db_name)s-%(backup_start_date)s.sql.gz' % params
self.logger.debug('[%s] %s ',self.backup_name,cmd)
cmd = "rm -f %(tmp_dir)s/%(db_name)s-%(backup_start_date)s.sql.gz" % params
self.logger.debug("[%s] %s ", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
p = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$')
p = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$")
for item in filelist:
if p.match(item):
dir_name = os.path.join(self.backup_dir,item)
if not dir_name in registered:
start = datetime.datetime.strptime(item,'%Y%m%d-%Hh%Mm%S').isoformat()
if fileisodate(dir_name)>start:
dir_name = os.path.join(self.backup_dir, item)
if dir_name not in registered:
start = datetime.datetime.strptime(item, "%Y%m%d-%Hh%Mm%S").isoformat()
if fileisodate(dir_name) > start:
stop = fileisodate(dir_name)
else:
stop = start
self.logger.info('Registering %s started on %s',dir_name,start)
self.logger.debug(' Disk usage %s','du -sb "%s"' % dir_name)
self.logger.info("Registering %s started on %s", dir_name, start)
self.logger.debug(" Disk usage %s", 'du -sb "%s"' % dir_name)
if not self.dry_run:
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split('\t')[0])
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split("\t")[0])
else:
size_bytes = 0
self.logger.debug(' Size in bytes : %i',size_bytes)
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end = stop,status='OK',total_bytes=size_bytes,backup_location=dir_name)
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=stop,
status="OK",
total_bytes=size_bytes,
backup_location=dir_name,
)
else:
self.logger.info('Skipping %s, already registered',dir_name)
self.logger.info("Skipping %s, already registered", dir_name)
register_driver(backup_pgsql)

View File

@ -30,78 +30,84 @@ from libtisbackup.common import *
class backup_rsync(backup_generic):
"""Backup a directory on remote server with rsync and rsync protocol (requires running remote rsync daemon)"""
type = 'rsync'
required_params = backup_generic.required_params + ['remote_user','remote_dir','rsync_module','password_file']
optional_params = backup_generic.optional_params + ['compressionlevel','compression','bwlimit','exclude_list','protect_args','overload_args']
remote_user='root'
remote_dir=''
type = "rsync"
required_params = backup_generic.required_params + ["remote_user", "remote_dir", "rsync_module", "password_file"]
optional_params = backup_generic.optional_params + [
"compressionlevel",
"compression",
"bwlimit",
"exclude_list",
"protect_args",
"overload_args",
]
exclude_list=''
rsync_module=''
password_file = ''
compression = ''
remote_user = "root"
remote_dir = ""
exclude_list = ""
rsync_module = ""
password_file = ""
compression = ""
bwlimit = 0
protect_args = '1'
protect_args = "1"
overload_args = None
compressionlevel = 0
def read_config(self, iniconf):
assert isinstance(iniconf, ConfigParser)
backup_generic.read_config(self, iniconf)
if not self.bwlimit and iniconf.has_option("global", "bw_limit"):
self.bwlimit = iniconf.getint("global", "bw_limit")
if not self.compressionlevel and iniconf.has_option("global", "compression_level"):
self.compressionlevel = iniconf.getint("global", "compression_level")
def read_config(self,iniconf):
assert(isinstance(iniconf,ConfigParser))
backup_generic.read_config(self,iniconf)
if not self.bwlimit and iniconf.has_option('global','bw_limit'):
self.bwlimit = iniconf.getint('global','bw_limit')
if not self.compressionlevel and iniconf.has_option('global','compression_level'):
self.compressionlevel = iniconf.getint('global','compression_level')
def do_backup(self,stats):
def do_backup(self, stats):
if not self.set_lock():
self.logger.error("[%s] a lock file is set, a backup maybe already running!!",self.backup_name)
self.logger.error("[%s] a lock file is set, a backup maybe already running!!", self.backup_name)
return False
try:
try:
backup_source = 'undefined'
dest_dir = os.path.join(self.backup_dir,self.backup_start_date+'.rsync/')
backup_source = "undefined"
dest_dir = os.path.join(self.backup_dir, self.backup_start_date + ".rsync/")
if not os.path.isdir(dest_dir):
if not self.dry_run:
os.makedirs(dest_dir)
else:
print(('mkdir "%s"' % dest_dir))
else:
raise Exception('backup destination directory already exists : %s' % dest_dir)
raise Exception("backup destination directory already exists : %s" % dest_dir)
options = ['-rt','--stats','--delete-excluded','--numeric-ids','--delete-after']
options = ["-rt", "--stats", "--delete-excluded", "--numeric-ids", "--delete-after"]
if self.logger.level:
options.append('-P')
options.append("-P")
if self.dry_run:
options.append('-d')
options.append("-d")
if self.overload_args != None:
if self.overload_args is not None:
options.append(self.overload_args)
elif not "cygdrive" in self.remote_dir:
elif "cygdrive" not in self.remote_dir:
# we don't preserve owner, group, links, hardlinks, perms for windows/cygwin as it is not reliable nor useful
options.append('-lpgoD')
options.append("-lpgoD")
# the protect-args option is not available in all rsync version
if not self.protect_args.lower() in ('false','no','0'):
options.append('--protect-args')
if self.protect_args.lower() not in ("false", "no", "0"):
options.append("--protect-args")
if self.compression.lower() in ('true','yes','1'):
options.append('-z')
if self.compression.lower() in ("true", "yes", "1"):
options.append("-z")
if self.compressionlevel:
options.append('--compress-level=%s' % self.compressionlevel)
options.append("--compress-level=%s" % self.compressionlevel)
if self.bwlimit:
options.append('--bwlimit %s' % self.bwlimit)
options.append("--bwlimit %s" % self.bwlimit)
latest = self.get_latest_backup(self.backup_start_date)
if latest:
options.extend(['--link-dest="%s"' % os.path.join('..',b,'') for b in latest])
options.extend(['--link-dest="%s"' % os.path.join("..", b, "") for b in latest])
def strip_quotes(s):
if s[0] == '"':
@ -113,173 +119,193 @@ class backup_rsync(backup_generic):
# Add excludes
if "--exclude" in self.exclude_list:
# old settings with exclude_list=--exclude toto --exclude=titi
excludes = [strip_quotes(s).strip() for s in self.exclude_list.replace('--exclude=','').replace('--exclude ','').split()]
excludes = [
strip_quotes(s).strip() for s in self.exclude_list.replace("--exclude=", "").replace("--exclude ", "").split()
]
else:
try:
# newsettings with exclude_list='too','titi', parsed as a str python list content
excludes = eval('[%s]' % self.exclude_list)
excludes = eval("[%s]" % self.exclude_list)
except Exception as e:
raise Exception('Error reading exclude list : value %s, eval error %s (don\'t forget quotes and comma...)' % (self.exclude_list,e))
raise Exception(
"Error reading exclude list : value %s, eval error %s (don't forget quotes and comma...)"
% (self.exclude_list, e)
)
options.extend(['--exclude="%s"' % x for x in excludes])
if (self.rsync_module and not self.password_file):
raise Exception('You must specify a password file if you specify a rsync module')
if self.rsync_module and not self.password_file:
raise Exception("You must specify a password file if you specify a rsync module")
if (not self.rsync_module and not self.private_key):
raise Exception('If you don''t use SSH, you must specify a rsync module')
if not self.rsync_module and not self.private_key:
raise Exception("If you don" "t use SSH, you must specify a rsync module")
#rsync_re = re.compile('(?P<server>[^:]*)::(?P<export>[^/]*)/(?P<path>.*)')
#ssh_re = re.compile('((?P<user>.*)@)?(?P<server>[^:]*):(?P<path>/.*)')
# rsync_re = re.compile('(?P<server>[^:]*)::(?P<export>[^/]*)/(?P<path>.*)')
# ssh_re = re.compile('((?P<user>.*)@)?(?P<server>[^:]*):(?P<path>/.*)')
# Add ssh connection params
if self.rsync_module:
# Case of rsync exports
if self.password_file:
options.append('--password-file="%s"' % self.password_file)
backup_source = '%s@%s::%s%s' % (self.remote_user, self.server_name, self.rsync_module, self.remote_dir)
backup_source = "%s@%s::%s%s" % (self.remote_user, self.server_name, self.rsync_module, self.remote_dir)
else:
# case of rsync + ssh
ssh_params = ['-o StrictHostKeyChecking=no']
ssh_params.append('-o BatchMode=yes')
ssh_params = ["-o StrictHostKeyChecking=no"]
ssh_params.append("-o BatchMode=yes")
if self.private_key:
ssh_params.append('-i %s' % self.private_key)
ssh_params.append("-i %s" % self.private_key)
if self.cipher_spec:
ssh_params.append('-c %s' % self.cipher_spec)
ssh_params.append("-c %s" % self.cipher_spec)
if self.ssh_port != 22:
ssh_params.append('-p %i' % self.ssh_port)
ssh_params.append("-p %i" % self.ssh_port)
options.append('-e "/usr/bin/ssh %s"' % (" ".join(ssh_params)))
backup_source = '%s@%s:%s' % (self.remote_user,self.server_name,self.remote_dir)
backup_source = "%s@%s:%s" % (self.remote_user, self.server_name, self.remote_dir)
# ensure there is a slash at end
if backup_source[-1] != '/':
backup_source += '/'
if backup_source[-1] != "/":
backup_source += "/"
options_params = " ".join(options)
cmd = '/usr/bin/rsync %s %s %s 2>&1' % (options_params,backup_source,dest_dir)
self.logger.debug("[%s] rsync : %s",self.backup_name,cmd)
cmd = "/usr/bin/rsync %s %s %s 2>&1" % (options_params, backup_source, dest_dir)
self.logger.debug("[%s] rsync : %s", self.backup_name, cmd)
if not self.dry_run:
self.line = ''
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
def ondata(data,context):
self.line = ""
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
def ondata(data, context):
if context.verbose:
print(data)
context.logger.debug(data)
log = monitor_stdout(process,ondata,self)
log = monitor_stdout(process, ondata, self)
reg_total_files = re.compile('Number of files: (?P<file>\d+)')
reg_transferred_files = re.compile('Number of .*files transferred: (?P<file>\d+)')
reg_total_files = re.compile("Number of files: (?P<file>\d+)")
reg_transferred_files = re.compile("Number of .*files transferred: (?P<file>\d+)")
for l in log.splitlines():
line = l.replace(',','')
line = l.replace(",", "")
m = reg_total_files.match(line)
if m:
stats['total_files_count'] += int(m.groupdict()['file'])
stats["total_files_count"] += int(m.groupdict()["file"])
m = reg_transferred_files.match(line)
if m:
stats['written_files_count'] += int(m.groupdict()['file'])
if line.startswith('Total file size:'):
stats['total_bytes'] += int(line.split(':')[1].split()[0])
if line.startswith('Total transferred file size:'):
stats['written_bytes'] += int(line.split(':')[1].split()[0])
stats["written_files_count"] += int(m.groupdict()["file"])
if line.startswith("Total file size:"):
stats["total_bytes"] += int(line.split(":")[1].split()[0])
if line.startswith("Total transferred file size:"):
stats["written_bytes"] += int(line.split(":")[1].split()[0])
returncode = process.returncode
## deal with exit code 24 (file vanished)
if (returncode == 24):
if returncode == 24:
self.logger.warning("[" + self.backup_name + "] Note: some files vanished before transfer")
elif (returncode == 23):
elif returncode == 23:
self.logger.warning("[" + self.backup_name + "] unable so set uid on some files")
elif (returncode != 0):
elif returncode != 0:
self.logger.error("[" + self.backup_name + "] shell program exited with error code " + str(returncode))
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:])
raise Exception(
"[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:]
)
else:
print(cmd)
#we suppress the .rsync suffix if everything went well
finaldest = os.path.join(self.backup_dir,self.backup_start_date)
self.logger.debug("[%s] renaming target directory from %s to %s" ,self.backup_name,dest_dir,finaldest)
# we suppress the .rsync suffix if everything went well
finaldest = os.path.join(self.backup_dir, self.backup_start_date)
self.logger.debug("[%s] renaming target directory from %s to %s", self.backup_name, dest_dir, finaldest)
if not self.dry_run:
os.rename(dest_dir, finaldest)
self.logger.debug("[%s] touching datetime of target directory %s" ,self.backup_name,finaldest)
self.logger.debug("[%s] touching datetime of target directory %s", self.backup_name, finaldest)
print((os.popen('touch "%s"' % finaldest).read()))
else:
print(("mv" ,dest_dir,finaldest))
stats['backup_location'] = finaldest
stats['status']='OK'
stats['log']='ssh+rsync backup from %s OK, %d bytes written for %d changed files' % (backup_source,stats['written_bytes'],stats['written_files_count'])
print(("mv", dest_dir, finaldest))
stats["backup_location"] = finaldest
stats["status"] = "OK"
stats["log"] = "ssh+rsync backup from %s OK, %d bytes written for %d changed files" % (
backup_source,
stats["written_bytes"],
stats["written_files_count"],
)
except BaseException as e:
stats['status']='ERROR'
stats['log']=str(e)
stats["status"] = "ERROR"
stats["log"] = str(e)
raise
finally:
self.remove_lock()
def get_latest_backup(self,current):
def get_latest_backup(self, current):
result = []
filelist = os.listdir(self.backup_dir)
filelist.sort()
filelist.reverse()
full = ''
r_full = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$')
r_partial = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}.rsync$')
# full = ''
r_full = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$")
r_partial = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}.rsync$")
# we take all latest partials younger than the latest full and the latest full
for item in filelist:
if r_partial.match(item) and item<current:
if r_partial.match(item) and item < current:
result.append(item)
elif r_full.match(item) and item<current:
elif r_full.match(item) and item < current:
result.append(item)
break
return result
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
p = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$')
p = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$")
for item in filelist:
if p.match(item):
dir_name = os.path.join(self.backup_dir,item)
if not dir_name in registered:
start = datetime.datetime.strptime(item,'%Y%m%d-%Hh%Mm%S').isoformat()
if fileisodate(dir_name)>start:
dir_name = os.path.join(self.backup_dir, item)
if dir_name not in registered:
start = datetime.datetime.strptime(item, "%Y%m%d-%Hh%Mm%S").isoformat()
if fileisodate(dir_name) > start:
stop = fileisodate(dir_name)
else:
stop = start
self.logger.info('Registering %s started on %s',dir_name,start)
self.logger.debug(' Disk usage %s','du -sb "%s"' % dir_name)
self.logger.info("Registering %s started on %s", dir_name, start)
self.logger.debug(" Disk usage %s", 'du -sb "%s"' % dir_name)
if not self.dry_run:
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split('\t')[0])
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split("\t")[0])
else:
size_bytes = 0
self.logger.debug(' Size in bytes : %i',size_bytes)
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end = stop,status='OK',total_bytes=size_bytes,backup_location=dir_name)
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=stop,
status="OK",
total_bytes=size_bytes,
backup_location=dir_name,
)
else:
self.logger.info('Skipping %s, already registered',dir_name)
self.logger.info("Skipping %s, already registered", dir_name)
def is_pid_still_running(self,lockfile):
def is_pid_still_running(self, lockfile):
f = open(lockfile)
lines = f.readlines()
f.close()
if len(lines)==0 :
if len(lines) == 0:
self.logger.info("[" + self.backup_name + "] empty lock file, removing...")
return False
for line in lines:
if line.startswith('pid='):
pid = line.split('=')[1].strip()
if line.startswith("pid="):
pid = line.split("=")[1].strip()
if os.path.exists("/proc/" + pid):
self.logger.info("[" + self.backup_name + "] process still there")
return True
@ -290,54 +316,63 @@ class backup_rsync(backup_generic):
self.logger.info("[" + self.backup_name + "] incorrrect lock file : no pid line")
return False
def set_lock(self):
self.logger.debug("[" + self.backup_name + "] setting lock")
#TODO: improve for race condition
#TODO: also check if process is really there
if os.path.isfile(self.backup_dir + '/lock'):
self.logger.debug("[" + self.backup_name + "] File " + self.backup_dir + '/lock already exist')
if self.is_pid_still_running(self.backup_dir + '/lock')==False:
self.logger.info("[" + self.backup_name + "] removing lock file " + self.backup_dir + '/lock')
os.unlink(self.backup_dir + '/lock')
# TODO: improve for race condition
# TODO: also check if process is really there
if os.path.isfile(self.backup_dir + "/lock"):
self.logger.debug("[" + self.backup_name + "] File " + self.backup_dir + "/lock already exist")
if not self.is_pid_still_running(self.backup_dir + "/lock"):
self.logger.info("[" + self.backup_name + "] removing lock file " + self.backup_dir + "/lock")
os.unlink(self.backup_dir + "/lock")
else:
return False
lockfile = open(self.backup_dir + '/lock',"w")
lockfile = open(self.backup_dir + "/lock", "w")
# Write all the lines at once:
lockfile.write('pid='+str(os.getpid()))
lockfile.write('\nbackup_time=' + self.backup_start_date)
lockfile.write("pid=" + str(os.getpid()))
lockfile.write("\nbackup_time=" + self.backup_start_date)
lockfile.close()
return True
def remove_lock(self):
self.logger.debug("[%s] removing lock",self.backup_name )
os.unlink(self.backup_dir + '/lock')
self.logger.debug("[%s] removing lock", self.backup_name)
os.unlink(self.backup_dir + "/lock")
class backup_rsync_ssh(backup_rsync):
"""Backup a directory on remote server with rsync and ssh protocol (requires rsync software on remote host)"""
type = 'rsync+ssh'
required_params = backup_generic.required_params + ['remote_user','remote_dir','private_key']
optional_params = backup_generic.optional_params + ['compression','bwlimit','ssh_port','exclude_list','protect_args','overload_args', 'cipher_spec']
cipher_spec = ''
type = "rsync+ssh"
required_params = backup_generic.required_params + ["remote_user", "remote_dir", "private_key"]
optional_params = backup_generic.optional_params + [
"compression",
"bwlimit",
"ssh_port",
"exclude_list",
"protect_args",
"overload_args",
"cipher_spec",
]
cipher_spec = ""
register_driver(backup_rsync)
register_driver(backup_rsync_ssh)
if __name__=='__main__':
logger = logging.getLogger('tisbackup')
if __name__ == "__main__":
logger = logging.getLogger("tisbackup")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
cp = ConfigParser()
cp.read('/opt/tisbackup/configtest.ini')
dbstat = BackupStat('/backup/data/log/tisbackup.sqlite')
b = backup_rsync('htouvet','/backup/data/htouvet',dbstat)
cp.read("/opt/tisbackup/configtest.ini")
dbstat = BackupStat("/backup/data/log/tisbackup.sqlite")
b = backup_rsync("htouvet", "/backup/data/htouvet", dbstat)
b.read_config(cp)
b.process_backup()
print((b.checknagios()))

View File

@ -30,86 +30,90 @@ from .common import *
class backup_rsync_btrfs(backup_generic):
"""Backup a directory on remote server with rsync and btrfs protocol (requires running remote rsync daemon)"""
type = 'rsync+btrfs'
required_params = backup_generic.required_params + ['remote_user','remote_dir','rsync_module','password_file']
optional_params = backup_generic.optional_params + ['compressionlevel','compression','bwlimit','exclude_list','protect_args','overload_args']
remote_user='root'
remote_dir=''
type = "rsync+btrfs"
required_params = backup_generic.required_params + ["remote_user", "remote_dir", "rsync_module", "password_file"]
optional_params = backup_generic.optional_params + [
"compressionlevel",
"compression",
"bwlimit",
"exclude_list",
"protect_args",
"overload_args",
]
exclude_list=''
rsync_module=''
password_file = ''
compression = ''
remote_user = "root"
remote_dir = ""
exclude_list = ""
rsync_module = ""
password_file = ""
compression = ""
bwlimit = 0
protect_args = '1'
protect_args = "1"
overload_args = None
compressionlevel = 0
def read_config(self, iniconf):
assert isinstance(iniconf, ConfigParser)
backup_generic.read_config(self, iniconf)
if not self.bwlimit and iniconf.has_option("global", "bw_limit"):
self.bwlimit = iniconf.getint("global", "bw_limit")
if not self.compressionlevel and iniconf.has_option("global", "compression_level"):
self.compressionlevel = iniconf.getint("global", "compression_level")
def read_config(self,iniconf):
assert(isinstance(iniconf,ConfigParser))
backup_generic.read_config(self,iniconf)
if not self.bwlimit and iniconf.has_option('global','bw_limit'):
self.bwlimit = iniconf.getint('global','bw_limit')
if not self.compressionlevel and iniconf.has_option('global','compression_level'):
self.compressionlevel = iniconf.getint('global','compression_level')
def do_backup(self,stats):
def do_backup(self, stats):
if not self.set_lock():
self.logger.error("[%s] a lock file is set, a backup maybe already running!!",self.backup_name)
self.logger.error("[%s] a lock file is set, a backup maybe already running!!", self.backup_name)
return False
try:
try:
backup_source = 'undefined'
dest_dir = os.path.join(self.backup_dir,'last_backup')
backup_source = "undefined"
dest_dir = os.path.join(self.backup_dir, "last_backup")
if not os.path.isdir(dest_dir):
if not self.dry_run:
cmd = "/bin/btrfs subvolume create %s"%dest_dir
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
log = monitor_stdout(process,'',self)
cmd = "/bin/btrfs subvolume create %s" % dest_dir
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
log = monitor_stdout(process, "", self)
returncode = process.returncode
if (returncode != 0):
self.logger.error("[" + self.backup_name + "] shell program exited with error code: %s"%log)
if returncode != 0:
self.logger.error("[" + self.backup_name + "] shell program exited with error code: %s" % log)
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd)
else:
self.logger.info("[" + self.backup_name + "] create btrs volume: %s"%dest_dir)
self.logger.info("[" + self.backup_name + "] create btrs volume: %s" % dest_dir)
else:
print(('btrfs subvolume create "%s"' %dest_dir))
print(('btrfs subvolume create "%s"' % dest_dir))
options = ['-rt','--stats','--delete-excluded','--numeric-ids','--delete-after']
options = ["-rt", "--stats", "--delete-excluded", "--numeric-ids", "--delete-after"]
if self.logger.level:
options.append('-P')
options.append("-P")
if self.dry_run:
options.append('-d')
options.append("-d")
if self.overload_args != None:
if self.overload_args is not None:
options.append(self.overload_args)
elif not "cygdrive" in self.remote_dir:
elif "cygdrive" not in self.remote_dir:
# we don't preserve owner, group, links, hardlinks, perms for windows/cygwin as it is not reliable nor useful
options.append('-lpgoD')
options.append("-lpgoD")
# the protect-args option is not available in all rsync version
if not self.protect_args.lower() in ('false','no','0'):
options.append('--protect-args')
if self.protect_args.lower() not in ("false", "no", "0"):
options.append("--protect-args")
if self.compression.lower() in ('true','yes','1'):
options.append('-z')
if self.compression.lower() in ("true", "yes", "1"):
options.append("-z")
if self.compressionlevel:
options.append('--compress-level=%s' % self.compressionlevel)
options.append("--compress-level=%s" % self.compressionlevel)
if self.bwlimit:
options.append('--bwlimit %s' % self.bwlimit)
options.append("--bwlimit %s" % self.bwlimit)
latest = self.get_latest_backup(self.backup_start_date)
#remove link-dest replace by btrfs
#if latest:
# latest = self.get_latest_backup(self.backup_start_date)
# remove link-dest replace by btrfs
# if latest:
# options.extend(['--link-dest="%s"' % os.path.join('..',b,'') for b in latest])
def strip_quotes(s):
@ -122,181 +126,203 @@ class backup_rsync_btrfs(backup_generic):
# Add excludes
if "--exclude" in self.exclude_list:
# old settings with exclude_list=--exclude toto --exclude=titi
excludes = [strip_quotes(s).strip() for s in self.exclude_list.replace('--exclude=','').replace('--exclude ','').split()]
excludes = [
strip_quotes(s).strip() for s in self.exclude_list.replace("--exclude=", "").replace("--exclude ", "").split()
]
else:
try:
# newsettings with exclude_list='too','titi', parsed as a str python list content
excludes = eval('[%s]' % self.exclude_list)
excludes = eval("[%s]" % self.exclude_list)
except Exception as e:
raise Exception('Error reading exclude list : value %s, eval error %s (don\'t forget quotes and comma...)' % (self.exclude_list,e))
raise Exception(
"Error reading exclude list : value %s, eval error %s (don't forget quotes and comma...)"
% (self.exclude_list, e)
)
options.extend(['--exclude="%s"' % x for x in excludes])
if (self.rsync_module and not self.password_file):
raise Exception('You must specify a password file if you specify a rsync module')
if self.rsync_module and not self.password_file:
raise Exception("You must specify a password file if you specify a rsync module")
if (not self.rsync_module and not self.private_key):
raise Exception('If you don''t use SSH, you must specify a rsync module')
if not self.rsync_module and not self.private_key:
raise Exception("If you don" "t use SSH, you must specify a rsync module")
#rsync_re = re.compile('(?P<server>[^:]*)::(?P<export>[^/]*)/(?P<path>.*)')
#ssh_re = re.compile('((?P<user>.*)@)?(?P<server>[^:]*):(?P<path>/.*)')
# rsync_re = re.compile('(?P<server>[^:]*)::(?P<export>[^/]*)/(?P<path>.*)')
# ssh_re = re.compile('((?P<user>.*)@)?(?P<server>[^:]*):(?P<path>/.*)')
# Add ssh connection params
if self.rsync_module:
# Case of rsync exports
if self.password_file:
options.append('--password-file="%s"' % self.password_file)
backup_source = '%s@%s::%s%s' % (self.remote_user, self.server_name, self.rsync_module, self.remote_dir)
backup_source = "%s@%s::%s%s" % (self.remote_user, self.server_name, self.rsync_module, self.remote_dir)
else:
# case of rsync + ssh
ssh_params = ['-o StrictHostKeyChecking=no']
ssh_params = ["-o StrictHostKeyChecking=no"]
if self.private_key:
ssh_params.append('-i %s' % self.private_key)
ssh_params.append("-i %s" % self.private_key)
if self.cipher_spec:
ssh_params.append('-c %s' % self.cipher_spec)
ssh_params.append("-c %s" % self.cipher_spec)
if self.ssh_port != 22:
ssh_params.append('-p %i' % self.ssh_port)
ssh_params.append("-p %i" % self.ssh_port)
options.append('-e "/usr/bin/ssh %s"' % (" ".join(ssh_params)))
backup_source = '%s@%s:%s' % (self.remote_user,self.server_name,self.remote_dir)
backup_source = "%s@%s:%s" % (self.remote_user, self.server_name, self.remote_dir)
# ensure there is a slash at end
if backup_source[-1] != '/':
backup_source += '/'
if backup_source[-1] != "/":
backup_source += "/"
options_params = " ".join(options)
cmd = '/usr/bin/rsync %s %s %s 2>&1' % (options_params,backup_source,dest_dir)
self.logger.debug("[%s] rsync : %s",self.backup_name,cmd)
cmd = "/usr/bin/rsync %s %s %s 2>&1" % (options_params, backup_source, dest_dir)
self.logger.debug("[%s] rsync : %s", self.backup_name, cmd)
if not self.dry_run:
self.line = ''
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
def ondata(data,context):
self.line = ""
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
def ondata(data, context):
if context.verbose:
print(data)
context.logger.debug(data)
log = monitor_stdout(process,ondata,self)
log = monitor_stdout(process, ondata, self)
reg_total_files = re.compile('Number of files: (?P<file>\d+)')
reg_transferred_files = re.compile('Number of .*files transferred: (?P<file>\d+)')
reg_total_files = re.compile("Number of files: (?P<file>\d+)")
reg_transferred_files = re.compile("Number of .*files transferred: (?P<file>\d+)")
for l in log.splitlines():
line = l.replace(',','')
line = l.replace(",", "")
m = reg_total_files.match(line)
if m:
stats['total_files_count'] += int(m.groupdict()['file'])
stats["total_files_count"] += int(m.groupdict()["file"])
m = reg_transferred_files.match(line)
if m:
stats['written_files_count'] += int(m.groupdict()['file'])
if line.startswith('Total file size:'):
stats['total_bytes'] += int(line.split(':')[1].split()[0])
if line.startswith('Total transferred file size:'):
stats['written_bytes'] += int(line.split(':')[1].split()[0])
stats["written_files_count"] += int(m.groupdict()["file"])
if line.startswith("Total file size:"):
stats["total_bytes"] += int(line.split(":")[1].split()[0])
if line.startswith("Total transferred file size:"):
stats["written_bytes"] += int(line.split(":")[1].split()[0])
returncode = process.returncode
## deal with exit code 24 (file vanished)
if (returncode == 24):
if returncode == 24:
self.logger.warning("[" + self.backup_name + "] Note: some files vanished before transfer")
elif (returncode == 23):
elif returncode == 23:
self.logger.warning("[" + self.backup_name + "] unable so set uid on some files")
elif (returncode != 0):
elif returncode != 0:
self.logger.error("[" + self.backup_name + "] shell program exited with error code ", str(returncode))
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:])
raise Exception(
"[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:]
)
else:
print(cmd)
#we take a snapshot of last_backup if everything went well
finaldest = os.path.join(self.backup_dir,self.backup_start_date)
self.logger.debug("[%s] snapshoting last_backup directory from %s to %s" ,self.backup_name,dest_dir,finaldest)
# we take a snapshot of last_backup if everything went well
finaldest = os.path.join(self.backup_dir, self.backup_start_date)
self.logger.debug("[%s] snapshoting last_backup directory from %s to %s", self.backup_name, dest_dir, finaldest)
if not os.path.isdir(finaldest):
if not self.dry_run:
cmd = "/bin/btrfs subvolume snapshot %s %s"%(dest_dir,finaldest)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
log = monitor_stdout(process,'',self)
cmd = "/bin/btrfs subvolume snapshot %s %s" % (dest_dir, finaldest)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
log = monitor_stdout(process, "", self)
returncode = process.returncode
if (returncode != 0):
if returncode != 0:
self.logger.error("[" + self.backup_name + "] shell program exited with error code " + str(returncode))
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:])
raise Exception(
"[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:]
)
else:
self.logger.info("[" + self.backup_name + "] snapshot directory created %s"%finaldest)
self.logger.info("[" + self.backup_name + "] snapshot directory created %s" % finaldest)
else:
print(("btrfs snapshot of %s to %s"%(dest_dir,finaldest)))
print(("btrfs snapshot of %s to %s" % (dest_dir, finaldest)))
else:
raise Exception('snapshot directory already exists : %s' %finaldest)
self.logger.debug("[%s] touching datetime of target directory %s" ,self.backup_name,finaldest)
raise Exception("snapshot directory already exists : %s" % finaldest)
self.logger.debug("[%s] touching datetime of target directory %s", self.backup_name, finaldest)
print((os.popen('touch "%s"' % finaldest).read()))
stats['backup_location'] = finaldest
stats['status']='OK'
stats['log']='ssh+rsync+btrfs backup from %s OK, %d bytes written for %d changed files' % (backup_source,stats['written_bytes'],stats['written_files_count'])
stats["backup_location"] = finaldest
stats["status"] = "OK"
stats["log"] = "ssh+rsync+btrfs backup from %s OK, %d bytes written for %d changed files" % (
backup_source,
stats["written_bytes"],
stats["written_files_count"],
)
except BaseException as e:
stats['status']='ERROR'
stats['log']=str(e)
stats["status"] = "ERROR"
stats["log"] = str(e)
raise
finally:
self.remove_lock()
def get_latest_backup(self,current):
def get_latest_backup(self, current):
result = []
filelist = os.listdir(self.backup_dir)
filelist.sort()
filelist.reverse()
full = ''
r_full = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$')
r_partial = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}.rsync$')
# full = ''
r_full = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$")
r_partial = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}.rsync$")
# we take all latest partials younger than the latest full and the latest full
for item in filelist:
if r_partial.match(item) and item<current:
if r_partial.match(item) and item < current:
result.append(item)
elif r_full.match(item) and item<current:
elif r_full.match(item) and item < current:
result.append(item)
break
return result
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
p = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$')
p = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$")
for item in filelist:
if p.match(item):
dir_name = os.path.join(self.backup_dir,item)
if not dir_name in registered:
start = datetime.datetime.strptime(item,'%Y%m%d-%Hh%Mm%S').isoformat()
if fileisodate(dir_name)>start:
dir_name = os.path.join(self.backup_dir, item)
if dir_name not in registered:
start = datetime.datetime.strptime(item, "%Y%m%d-%Hh%Mm%S").isoformat()
if fileisodate(dir_name) > start:
stop = fileisodate(dir_name)
else:
stop = start
self.logger.info('Registering %s started on %s',dir_name,start)
self.logger.debug(' Disk usage %s','du -sb "%s"' % dir_name)
self.logger.info("Registering %s started on %s", dir_name, start)
self.logger.debug(" Disk usage %s", 'du -sb "%s"' % dir_name)
if not self.dry_run:
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split('\t')[0])
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split("\t")[0])
else:
size_bytes = 0
self.logger.debug(' Size in bytes : %i',size_bytes)
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end = stop,status='OK',total_bytes=size_bytes,backup_location=dir_name)
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=stop,
status="OK",
total_bytes=size_bytes,
backup_location=dir_name,
)
else:
self.logger.info('Skipping %s, already registered',dir_name)
self.logger.info("Skipping %s, already registered", dir_name)
def is_pid_still_running(self,lockfile):
def is_pid_still_running(self, lockfile):
f = open(lockfile)
lines = f.readlines()
f.close()
if len(lines)==0 :
if len(lines) == 0:
self.logger.info("[" + self.backup_name + "] empty lock file, removing...")
return False
for line in lines:
if line.startswith('pid='):
pid = line.split('=')[1].strip()
if line.startswith("pid="):
pid = line.split("=")[1].strip()
if os.path.exists("/proc/" + pid):
self.logger.info("[" + self.backup_name + "] process still there")
return True
@ -307,54 +333,63 @@ class backup_rsync_btrfs(backup_generic):
self.logger.info("[" + self.backup_name + "] incorrrect lock file : no pid line")
return False
def set_lock(self):
self.logger.debug("[" + self.backup_name + "] setting lock")
#TODO: improve for race condition
#TODO: also check if process is really there
if os.path.isfile(self.backup_dir + '/lock'):
self.logger.debug("[" + self.backup_name + "] File " + self.backup_dir + '/lock already exist')
if self.is_pid_still_running(self.backup_dir + '/lock')==False:
self.logger.info("[" + self.backup_name + "] removing lock file " + self.backup_dir + '/lock')
os.unlink(self.backup_dir + '/lock')
# TODO: improve for race condition
# TODO: also check if process is really there
if os.path.isfile(self.backup_dir + "/lock"):
self.logger.debug("[" + self.backup_name + "] File " + self.backup_dir + "/lock already exist")
if not self.is_pid_still_running(self.backup_dir + "/lock"):
self.logger.info("[" + self.backup_name + "] removing lock file " + self.backup_dir + "/lock")
os.unlink(self.backup_dir + "/lock")
else:
return False
lockfile = open(self.backup_dir + '/lock',"w")
lockfile = open(self.backup_dir + "/lock", "w")
# Write all the lines at once:
lockfile.write('pid='+str(os.getpid()))
lockfile.write('\nbackup_time=' + self.backup_start_date)
lockfile.write("pid=" + str(os.getpid()))
lockfile.write("\nbackup_time=" + self.backup_start_date)
lockfile.close()
return True
def remove_lock(self):
self.logger.debug("[%s] removing lock",self.backup_name )
os.unlink(self.backup_dir + '/lock')
self.logger.debug("[%s] removing lock", self.backup_name)
os.unlink(self.backup_dir + "/lock")
class backup_rsync__btrfs_ssh(backup_rsync_btrfs):
"""Backup a directory on remote server with rsync,ssh and btrfs protocol (requires rsync software on remote host)"""
type = 'rsync+btrfs+ssh'
required_params = backup_generic.required_params + ['remote_user','remote_dir','private_key']
optional_params = backup_generic.optional_params + ['compression','bwlimit','ssh_port','exclude_list','protect_args','overload_args','cipher_spec']
cipher_spec = ''
type = "rsync+btrfs+ssh"
required_params = backup_generic.required_params + ["remote_user", "remote_dir", "private_key"]
optional_params = backup_generic.optional_params + [
"compression",
"bwlimit",
"ssh_port",
"exclude_list",
"protect_args",
"overload_args",
"cipher_spec",
]
cipher_spec = ""
register_driver(backup_rsync_btrfs)
register_driver(backup_rsync__btrfs_ssh)
if __name__=='__main__':
logger = logging.getLogger('tisbackup')
if __name__ == "__main__":
logger = logging.getLogger("tisbackup")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
cp = ConfigParser()
cp.read('/opt/tisbackup/configtest.ini')
dbstat = BackupStat('/backup/data/log/tisbackup.sqlite')
b = backup_rsync('htouvet','/backup/data/htouvet',dbstat)
cp.read("/opt/tisbackup/configtest.ini")
dbstat = BackupStat("/backup/data/log/tisbackup.sqlite")
b = backup_rsync("htouvet", "/backup/data/htouvet", dbstat)
b.read_config(cp)
b.process_backup()
print((b.checknagios()))

View File

@ -19,11 +19,10 @@
# -----------------------------------------------------------------------
import sys
try:
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
sys.stderr = open("/dev/null") # Silence silly warnings from paramiko
import paramiko
except ImportError as e:
print("Error : can not load paramiko library %s" % e)
@ -36,14 +35,15 @@ from .common import *
class backup_samba4(backup_generic):
"""Backup a samba4 databases as gzipped tdbs file through ssh"""
type = 'samba4'
required_params = backup_generic.required_params + ['private_key']
optional_params = backup_generic.optional_params + ['root_dir_samba']
type = "samba4"
required_params = backup_generic.required_params + ["private_key"]
optional_params = backup_generic.optional_params + ["root_dir_samba"]
root_dir_samba = "/var/lib/samba/"
def do_backup(self,stats):
self.dest_dir = os.path.join(self.backup_dir,self.backup_start_date)
def do_backup(self, stats):
self.dest_dir = os.path.join(self.backup_dir, self.backup_start_date)
if not os.path.isdir(self.dest_dir):
if not self.dry_run:
@ -51,118 +51,128 @@ class backup_samba4(backup_generic):
else:
print('mkdir "%s"' % self.dest_dir)
else:
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
raise Exception("backup destination directory already exists : %s" % self.dest_dir)
self.logger.debug('[%s] Connecting to %s with user root and key %s',self.backup_name,self.server_name,self.private_key)
self.logger.debug("[%s] Connecting to %s with user root and key %s", self.backup_name, self.server_name, self.private_key)
try:
mykey = paramiko.RSAKey.from_private_key_file(self.private_key)
except paramiko.SSHException:
#mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
# mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
mykey = paramiko.Ed25519Key.from_private_key_file(self.private_key)
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.server_name,username='root',pkey = mykey, port=self.ssh_port)
self.ssh.connect(self.server_name, username="root", pkey=mykey, port=self.ssh_port)
stats['log']= "Successfully backuping processed to the following databases :"
stats['status']='List'
dir_ldbs = os.path.join(self.root_dir_samba+'/private/sam.ldb.d/')
cmd = 'ls %s/*.ldb 2> /dev/null' % dir_ldbs
self.logger.debug('[%s] List databases: %s',self.backup_name,cmd)
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
stats["log"] = "Successfully backuping processed to the following databases :"
stats["status"] = "List"
dir_ldbs = os.path.join(self.root_dir_samba + "/private/sam.ldb.d/")
cmd = "ls %s/*.ldb 2> /dev/null" % dir_ldbs
self.logger.debug("[%s] List databases: %s", self.backup_name, cmd)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
databases = output.split('\n')
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
databases = output.split("\n")
for database in databases:
if database != "":
self.db_name = database.rstrip()
self.do_mysqldump(stats)
def do_mysqldump(self,stats):
t = datetime.datetime.now()
backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
def do_mysqldump(self, stats):
# t = datetime.datetime.now()
# backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
# dump db
stats['status']='Dumping'
cmd = 'tdbbackup -s .tisbackup ' + self.db_name
self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd)
stats["status"] = "Dumping"
cmd = "tdbbackup -s .tisbackup " + self.db_name
self.logger.debug("[%s] Dump DB : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
print(output)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
# zip the file
stats['status']='Zipping'
stats["status"] = "Zipping"
cmd = 'gzip -f "%s.tisbackup"' % self.db_name
self.logger.debug('[%s] Compress backup : %s',self.backup_name,cmd)
self.logger.debug("[%s] Compress backup : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
# get the file
stats['status']='SFTP'
filepath = self.db_name + '.tisbackup.gz'
localpath = os.path.join(self.dest_dir , os.path.basename(self.db_name) + '.tisbackup.gz')
self.logger.debug('[%s] Get gz backup with sftp on %s from %s to %s',self.backup_name,self.server_name,filepath,localpath)
stats["status"] = "SFTP"
filepath = self.db_name + ".tisbackup.gz"
localpath = os.path.join(self.dest_dir, os.path.basename(self.db_name) + ".tisbackup.gz")
self.logger.debug("[%s] Get gz backup with sftp on %s from %s to %s", self.backup_name, self.server_name, filepath, localpath)
if not self.dry_run:
transport = self.ssh.get_transport()
transport = self.ssh.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.get(filepath, localpath)
sftp.close()
if not self.dry_run:
stats['total_files_count']=1 + stats.get('total_files_count', 0)
stats['written_files_count']=1 + stats.get('written_files_count', 0)
stats['total_bytes']=os.stat(localpath).st_size + stats.get('total_bytes', 0)
stats['written_bytes']=os.stat(localpath).st_size + stats.get('written_bytes', 0)
stats['log'] = '%s "%s"' % (stats['log'] ,self.db_name)
stats['backup_location'] = self.dest_dir
stats["total_files_count"] = 1 + stats.get("total_files_count", 0)
stats["written_files_count"] = 1 + stats.get("written_files_count", 0)
stats["total_bytes"] = os.stat(localpath).st_size + stats.get("total_bytes", 0)
stats["written_bytes"] = os.stat(localpath).st_size + stats.get("written_bytes", 0)
stats["log"] = '%s "%s"' % (stats["log"], self.db_name)
stats["backup_location"] = self.dest_dir
stats['status']='RMTemp'
stats["status"] = "RMTemp"
cmd = 'rm -f "%s"' % filepath
self.logger.debug('[%s] Remove temp gzip : %s',self.backup_name,cmd)
self.logger.debug("[%s] Remove temp gzip : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=self.ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
stats['status']='OK'
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
stats["status"] = "OK"
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
p = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$')
p = re.compile("^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$")
for item in filelist:
if p.match(item):
dir_name = os.path.join(self.backup_dir,item)
if not dir_name in registered:
start = datetime.datetime.strptime(item,'%Y%m%d-%Hh%Mm%S').isoformat()
if fileisodate(dir_name)>start:
dir_name = os.path.join(self.backup_dir, item)
if dir_name not in registered:
start = datetime.datetime.strptime(item, "%Y%m%d-%Hh%Mm%S").isoformat()
if fileisodate(dir_name) > start:
stop = fileisodate(dir_name)
else:
stop = start
self.logger.info('Registering %s started on %s',dir_name,start)
self.logger.debug(' Disk usage %s','du -sb "%s"' % dir_name)
self.logger.info("Registering %s started on %s", dir_name, start)
self.logger.debug(" Disk usage %s", 'du -sb "%s"' % dir_name)
if not self.dry_run:
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split('\t')[0])
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split("\t")[0])
else:
size_bytes = 0
self.logger.debug(' Size in bytes : %i',size_bytes)
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end = stop,status='OK',total_bytes=size_bytes,backup_location=dir_name)
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=stop,
status="OK",
total_bytes=size_bytes,
backup_location=dir_name,
)
else:
self.logger.info('Skipping %s, already registered',dir_name)
self.logger.info("Skipping %s, already registered", dir_name)
register_driver(backup_samba4)

View File

@ -19,11 +19,10 @@
# -----------------------------------------------------------------------
import sys
try:
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
sys.stderr = open("/dev/null") # Silence silly warnings from paramiko
import paramiko
except ImportError as e:
print("Error : can not load paramiko library %s" % e)
@ -40,122 +39,137 @@ from .common import *
class backup_sqlserver(backup_generic):
"""Backup a SQLSERVER database as gzipped sql file through ssh"""
type = 'sqlserver+ssh'
required_params = backup_generic.required_params + ['db_name','private_key']
optional_params = ['username', 'remote_backup_dir', 'sqlserver_before_2005', 'db_server_name', 'db_user', 'db_password']
db_name=''
db_user=''
db_password=''
type = "sqlserver+ssh"
required_params = backup_generic.required_params + ["db_name", "private_key"]
optional_params = ["username", "remote_backup_dir", "sqlserver_before_2005", "db_server_name", "db_user", "db_password"]
db_name = ""
db_user = ""
db_password = ""
userdb = "-E"
username='Administrateur'
remote_backup_dir = r'c:/WINDOWS/Temp/'
username = "Administrateur"
remote_backup_dir = r"c:/WINDOWS/Temp/"
sqlserver_before_2005 = False
db_server_name = "localhost"
def do_backup(self,stats):
def do_backup(self, stats):
try:
mykey = paramiko.RSAKey.from_private_key_file(self.private_key)
except paramiko.SSHException:
#mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
# mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
mykey = paramiko.Ed25519Key.from_private_key_file(self.private_key)
self.logger.debug('[%s] Connecting to %s with user root and key %s',self.backup_name,self.server_name,self.private_key)
self.logger.debug("[%s] Connecting to %s with user root and key %s", self.backup_name, self.server_name, self.private_key)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(self.server_name,username=self.username,pkey=mykey, port=self.ssh_port)
ssh.connect(self.server_name, username=self.username, pkey=mykey, port=self.ssh_port)
t = datetime.datetime.now()
backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
backup_start_date = t.strftime("%Y%m%d-%Hh%Mm%S")
backup_file = self.remote_backup_dir + '/' + self.db_name + '-' + backup_start_date + '.bak'
if not self.db_user == '':
self.userdb = '-U %s -P %s' % ( self.db_user, self.db_password )
backup_file = self.remote_backup_dir + "/" + self.db_name + "-" + backup_start_date + ".bak"
if not self.db_user == "":
self.userdb = "-U %s -P %s" % (self.db_user, self.db_password)
# dump db
stats['status']='Dumping'
stats["status"] = "Dumping"
if self.sqlserver_before_2005:
cmd = """osql -E -Q "BACKUP DATABASE [%s]
cmd = """osql -E -Q "BACKUP DATABASE [%s]
TO DISK='%s'
WITH FORMAT" """ % ( self.db_name, backup_file )
WITH FORMAT" """ % (self.db_name, backup_file)
else:
cmd = """sqlcmd %s -S "%s" -d master -Q "BACKUP DATABASE [%s]
cmd = """sqlcmd %s -S "%s" -d master -Q "BACKUP DATABASE [%s]
TO DISK = N'%s'
WITH INIT, NOUNLOAD ,
NAME = N'Backup %s', NOSKIP ,STATS = 10, NOFORMAT" """ % (self.userdb, self.db_server_name, self.db_name, backup_file ,self.db_name )
self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd)
NAME = N'Backup %s', NOSKIP ,STATS = 10, NOFORMAT" """ % (
self.userdb,
self.db_server_name,
self.db_name,
backup_file,
self.db_name,
)
self.logger.debug("[%s] Dump DB : %s", self.backup_name, cmd)
try:
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
# zip the file
stats['status']='Zipping'
stats["status"] = "Zipping"
cmd = 'gzip "%s"' % backup_file
self.logger.debug('[%s] Compress backup : %s',self.backup_name,cmd)
self.logger.debug("[%s] Compress backup : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
# get the file
stats['status']='SFTP'
filepath = backup_file + '.gz'
localpath = os.path.join(self.backup_dir , self.db_name + '-' + backup_start_date + '.bak.gz')
self.logger.debug('[%s] Get gz backup with sftp on %s from %s to %s',self.backup_name,self.server_name,filepath,localpath)
stats["status"] = "SFTP"
filepath = backup_file + ".gz"
localpath = os.path.join(self.backup_dir, self.db_name + "-" + backup_start_date + ".bak.gz")
self.logger.debug("[%s] Get gz backup with sftp on %s from %s to %s", self.backup_name, self.server_name, filepath, localpath)
if not self.dry_run:
transport = ssh.get_transport()
transport = ssh.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.get(filepath, localpath)
sftp.close()
if not self.dry_run:
stats['total_files_count']=1
stats['written_files_count']=1
stats['total_bytes']=os.stat(localpath).st_size
stats['written_bytes']=os.stat(localpath).st_size
stats['log']='gzip dump of DB %s:%s (%d bytes) to %s' % (self.server_name,self.db_name, stats['written_bytes'], localpath)
stats['backup_location'] = localpath
stats["total_files_count"] = 1
stats["written_files_count"] = 1
stats["total_bytes"] = os.stat(localpath).st_size
stats["written_bytes"] = os.stat(localpath).st_size
stats["log"] = "gzip dump of DB %s:%s (%d bytes) to %s" % (self.server_name, self.db_name, stats["written_bytes"], localpath)
stats["backup_location"] = localpath
finally:
stats['status']='RMTemp'
cmd = 'rm -f "%s" "%s"' % ( backup_file + '.gz', backup_file )
self.logger.debug('[%s] Remove temp gzip : %s',self.backup_name,cmd)
stats["status"] = "RMTemp"
cmd = 'rm -f "%s" "%s"' % (backup_file + ".gz", backup_file)
self.logger.debug("[%s] Remove temp gzip : %s", self.backup_name, cmd)
if not self.dry_run:
(error_code,output) = ssh_exec(cmd,ssh=ssh)
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
(error_code, output) = ssh_exec(cmd, ssh=ssh)
self.logger.debug("[%s] Output of %s :\n%s", self.backup_name, cmd, output)
if error_code:
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code, cmd))
stats['status']='OK'
stats["status"] = "OK"
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
p = re.compile('^%s-(?P<date>\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}).bak.gz$' % self.db_name)
p = re.compile("^%s-(?P<date>\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}).bak.gz$" % self.db_name)
for item in filelist:
sr = p.match(item)
if sr:
file_name = os.path.join(self.backup_dir,item)
start = datetime.datetime.strptime(sr.groups()[0],'%Y%m%d-%Hh%Mm%S').isoformat()
if not file_name in registered:
self.logger.info('Registering %s from %s',file_name,fileisodate(file_name))
size_bytes = int(os.popen('du -sb "%s"' % file_name).read().split('\t')[0])
self.logger.debug(' Size in bytes : %i',size_bytes)
file_name = os.path.join(self.backup_dir, item)
start = datetime.datetime.strptime(sr.groups()[0], "%Y%m%d-%Hh%Mm%S").isoformat()
if file_name not in registered:
self.logger.info("Registering %s from %s", file_name, fileisodate(file_name))
size_bytes = int(os.popen('du -sb "%s"' % file_name).read().split("\t")[0])
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end=fileisodate(file_name),status='OK',total_bytes=size_bytes,backup_location=file_name)
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=fileisodate(file_name),
status="OK",
total_bytes=size_bytes,
backup_location=file_name,
)
else:
self.logger.info('Skipping %s from %s, already registered',file_name,fileisodate(file_name))
self.logger.info("Skipping %s from %s, already registered", file_name, fileisodate(file_name))
register_driver(backup_sqlserver)

View File

@ -41,16 +41,16 @@ from .common import *
class backup_switch(backup_generic):
"""Backup a startup-config on a switch"""
type = 'switch'
required_params = backup_generic.required_params + ['switch_ip','switch_type']
optional_params = backup_generic.optional_params + [ 'switch_user', 'switch_password']
type = "switch"
switch_user = ''
switch_password = ''
required_params = backup_generic.required_params + ["switch_ip", "switch_type"]
optional_params = backup_generic.optional_params + ["switch_user", "switch_password"]
switch_user = ""
switch_password = ""
def switch_hp(self, filename):
s = socket.socket()
try:
s.connect((self.switch_ip, 23))
@ -58,31 +58,31 @@ class backup_switch(backup_generic):
except:
raise
child=pexpect.spawn('telnet '+self.switch_ip)
child = pexpect.spawn("telnet " + self.switch_ip)
time.sleep(1)
if self.switch_user != "":
child.sendline(self.switch_user)
child.sendline(self.switch_password+'\r')
child.sendline(self.switch_password + "\r")
else:
child.sendline(self.switch_password+'\r')
child.sendline(self.switch_password + "\r")
try:
child.expect("#")
except:
raise Exception("Bad Credentials")
child.sendline( "terminal length 1000\r")
child.sendline("terminal length 1000\r")
child.expect("#")
child.sendline( "show config\r")
child.sendline("show config\r")
child.maxread = 100000000
child.expect("Startup.+$")
lines = child.after
if "-- MORE --" in lines:
if "-- MORE --" in lines:
raise Exception("Terminal lenght is not sufficient")
child.expect("#")
lines += child.before
child.sendline("logout\r")
child.send('y\r')
child.send("y\r")
for line in lines.split("\n")[1:-1]:
open(filename,"a").write(line.strip()+"\n")
open(filename, "a").write(line.strip() + "\n")
def switch_cisco(self, filename):
s = socket.socket()
@ -92,38 +92,37 @@ class backup_switch(backup_generic):
except:
raise
child=pexpect.spawn('telnet '+self.switch_ip)
child = pexpect.spawn("telnet " + self.switch_ip)
time.sleep(1)
if self.switch_user:
child.sendline(self.switch_user)
child.expect('Password: ')
child.sendline(self.switch_password+'\r')
child.expect("Password: ")
child.sendline(self.switch_password + "\r")
try:
child.expect(">")
except:
raise Exception("Bad Credentials")
child.sendline('enable\r')
child.expect('Password: ')
child.sendline(self.switch_password+'\r')
child.sendline("enable\r")
child.expect("Password: ")
child.sendline(self.switch_password + "\r")
try:
child.expect("#")
except:
raise Exception("Bad Credentials")
child.sendline( "terminal length 0\r")
child.sendline("terminal length 0\r")
child.expect("#")
child.sendline("show run\r")
child.expect('Building configuration...')
child.expect("Building configuration...")
child.expect("#")
running_config = child.before
child.sendline("show vlan\r")
child.expect('VLAN')
child.expect("VLAN")
child.expect("#")
vlan = 'VLAN'+child.before
open(filename,"a").write(running_config+'\n'+vlan)
child.send('exit\r')
vlan = "VLAN" + child.before
open(filename, "a").write(running_config + "\n" + vlan)
child.send("exit\r")
child.close()
def switch_linksys_SRW2024(self, filename):
s = socket.socket()
try:
@ -132,48 +131,53 @@ class backup_switch(backup_generic):
except:
raise
child=pexpect.spawn('telnet '+self.switch_ip)
child = pexpect.spawn("telnet " + self.switch_ip)
time.sleep(1)
if hasattr(self,'switch_password'):
child.sendline(self.switch_user+'\t')
child.sendline(self.switch_password+'\r')
if hasattr(self, "switch_password"):
child.sendline(self.switch_user + "\t")
child.sendline(self.switch_password + "\r")
else:
child.sendline(self.switch_user+'\r')
child.sendline(self.switch_user + "\r")
try:
child.expect('Menu')
child.expect("Menu")
except:
raise Exception("Bad Credentials")
child.sendline('\032')
child.expect('>')
child.sendline('lcli')
child.sendline("\032")
child.expect(">")
child.sendline("lcli")
child.expect("Name:")
if hasattr(self,'switch_password'):
child.send(self.switch_user+'\r'+self.switch_password+'\r')
if hasattr(self, "switch_password"):
child.send(self.switch_user + "\r" + self.switch_password + "\r")
else:
child.sendline(self.switch_user)
child.expect(".*#")
child.sendline( "terminal datadump")
child.sendline("terminal datadump")
child.expect("#")
child.sendline( "show startup-config")
child.sendline("show startup-config")
child.expect("#")
lines = child.before
if "Unrecognized command" in lines:
raise Exception("Bad Credentials")
child.sendline("exit")
#child.expect( ">")
#child.sendline("logout")
# child.expect( ">")
# child.sendline("logout")
for line in lines.split("\n")[1:-1]:
open(filename,"a").write(line.strip()+"\n")
open(filename, "a").write(line.strip() + "\n")
def switch_dlink_DGS1210(self, filename):
login_data = {'Login' : self.switch_user, 'Password' : self.switch_password, 'sellanId' : 0, 'sellan' : 0, 'lang_seqid' : 1}
resp = requests.post('http://%s/form/formLoginApply' % self.switch_ip, data=login_data, headers={"Referer":'http://%s/www/login.html' % self.switch_ip})
login_data = {"Login": self.switch_user, "Password": self.switch_password, "sellanId": 0, "sellan": 0, "lang_seqid": 1}
resp = requests.post(
"http://%s/form/formLoginApply" % self.switch_ip,
data=login_data,
headers={"Referer": "http://%s/www/login.html" % self.switch_ip},
)
if "Wrong password" in resp.text:
raise Exception("Wrong password")
resp = requests.post("http://%s/BinFile/config.bin" % self.switch_ip, headers={"Referer":'http://%s/www/iss/013_download_cfg.html' % self.switch_ip})
with open(filename, 'w') as f:
resp = requests.post(
"http://%s/BinFile/config.bin" % self.switch_ip, headers={"Referer": "http://%s/www/iss/013_download_cfg.html" % self.switch_ip}
)
with open(filename, "w") as f:
f.write(resp.content)
def switch_dlink_DGS1510(self, filename):
@ -184,12 +188,12 @@ class backup_switch(backup_generic):
except:
raise
child = pexpect.spawn('telnet ' + self.switch_ip)
child = pexpect.spawn("telnet " + self.switch_ip)
time.sleep(1)
if self.switch_user:
child.sendline(self.switch_user)
child.expect('Password:')
child.sendline(self.switch_password + '\r')
child.expect("Password:")
child.sendline(self.switch_password + "\r")
try:
child.expect("#")
except:
@ -198,67 +202,66 @@ class backup_switch(backup_generic):
child.expect("#")
child.sendline("show run\r")
child.logfile_read = open(filename, "a")
child.expect('End of configuration file')
child.expect('#--')
child.expect("End of configuration file")
child.expect("#--")
child.expect("#")
child.close()
myre = re.compile("#--+")
config = myre.split(open(filename).read())[2]
with open(filename,'w') as f:
with open(filename, "w") as f:
f.write(config)
def do_backup(self,stats):
def do_backup(self, stats):
try:
dest_filename = os.path.join(self.backup_dir,"%s-%s" % (self.backup_name,self.backup_start_date))
dest_filename = os.path.join(self.backup_dir, "%s-%s" % (self.backup_name, self.backup_start_date))
options = []
options_params = " ".join(options)
# options = []
# options_params = " ".join(options)
if "LINKSYS-SRW2024" == self.switch_type:
dest_filename += '.txt'
dest_filename += ".txt"
self.switch_linksys_SRW2024(dest_filename)
elif self.switch_type in [ "CISCO", ]:
dest_filename += '.txt'
elif self.switch_type in [
"CISCO",
]:
dest_filename += ".txt"
self.switch_cisco(dest_filename)
elif self.switch_type in [ "HP-PROCURVE-4104GL", "HP-PROCURVE-2524" ]:
dest_filename += '.txt'
elif self.switch_type in ["HP-PROCURVE-4104GL", "HP-PROCURVE-2524"]:
dest_filename += ".txt"
self.switch_hp(dest_filename)
elif "DLINK-DGS1210" == self.switch_type:
dest_filename += '.bin'
dest_filename += ".bin"
self.switch_dlink_DGS1210(dest_filename)
elif "DLINK-DGS1510" == self.switch_type:
dest_filename += '.cfg'
dest_filename += ".cfg"
self.switch_dlink_DGS1510(dest_filename)
else:
raise Exception("Unknown Switch type")
stats['total_files_count']=1
stats['written_files_count']=1
stats['total_bytes']= os.stat(dest_filename).st_size
stats['written_bytes'] = stats['total_bytes']
stats['backup_location'] = dest_filename
stats['status']='OK'
stats['log']='Switch backup from %s OK, %d bytes written' % (self.server_name,stats['written_bytes'])
stats["total_files_count"] = 1
stats["written_files_count"] = 1
stats["total_bytes"] = os.stat(dest_filename).st_size
stats["written_bytes"] = stats["total_bytes"]
stats["backup_location"] = dest_filename
stats["status"] = "OK"
stats["log"] = "Switch backup from %s OK, %d bytes written" % (self.server_name, stats["written_bytes"])
except BaseException as e:
stats['status']='ERROR'
stats['log']=str(e)
stats["status"] = "ERROR"
stats["log"] = str(e)
raise
register_driver(backup_switch)
if __name__=='__main__':
logger = logging.getLogger('tisbackup')
if __name__ == "__main__":
logger = logging.getLogger("tisbackup")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
cp = ConfigParser()
cp.read('/opt/tisbackup/configtest.ini')
cp.read("/opt/tisbackup/configtest.ini")
b = backup_xva()
b.read_config(cp)

View File

@ -26,6 +26,7 @@ import pyVmomi
import requests
from pyVim.connect import Disconnect, SmartConnect
from pyVmomi import vim, vmodl
# Disable HTTPS verification warnings.
from requests.packages import urllib3
@ -41,39 +42,36 @@ from stat import *
class backup_vmdk(backup_generic):
type = 'esx-vmdk'
type = "esx-vmdk"
required_params = backup_generic.required_params + ['esxhost','password_file','server_name']
optional_params = backup_generic.optional_params + ['esx_port', 'prefix_clone', 'create_ovafile', 'halt_vm']
required_params = backup_generic.required_params + ["esxhost", "password_file", "server_name"]
optional_params = backup_generic.optional_params + ["esx_port", "prefix_clone", "create_ovafile", "halt_vm"]
esx_port = 443
prefix_clone = "clone-"
create_ovafile = "no"
halt_vm = "no"
def make_compatible_cookie(self,client_cookie):
def make_compatible_cookie(self, client_cookie):
cookie_name = client_cookie.split("=", 1)[0]
cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
cookie_path = client_cookie.split("=", 1)[1].split(";", 1)[1].split(
";", 1)[0].lstrip()
cookie_path = client_cookie.split("=", 1)[1].split(";", 1)[1].split(";", 1)[0].lstrip()
cookie_text = " " + cookie_value + "; $" + cookie_path
# Make a cookie
cookie = dict()
cookie[cookie_name] = cookie_text
return cookie
def download_file(self,url, local_filename, cookie, headers):
r = requests.get(url, stream=True, headers=headers,cookies=cookie,verify=False)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024*1024*64):
def download_file(self, url, local_filename, cookie, headers):
r = requests.get(url, stream=True, headers=headers, cookies=cookie, verify=False)
with open(local_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=1024 * 1024 * 64):
if chunk:
f.write(chunk)
f.flush()
return local_filename
def export_vmdks(self,vm):
def export_vmdks(self, vm):
HttpNfcLease = vm.ExportVm()
try:
infos = HttpNfcLease.info
@ -82,153 +80,147 @@ class backup_vmdk(backup_generic):
for device_url in device_urls:
deviceId = device_url.key
deviceUrlStr = device_url.url
diskFileName = vm.name.replace(self.prefix_clone,'') + "-" + device_url.targetId
diskFileName = vm.name.replace(self.prefix_clone, "") + "-" + device_url.targetId
diskUrlStr = deviceUrlStr.replace("*", self.esxhost)
diskLocalPath = './' + diskFileName
# diskLocalPath = './' + diskFileName
cookie = self.make_compatible_cookie(si._stub.cookie)
headers = {'Content-Type': 'application/octet-stream'}
self.logger.debug("[%s] exporting disk: %s" %(self.server_name,diskFileName))
headers = {"Content-Type": "application/octet-stream"}
self.logger.debug("[%s] exporting disk: %s" % (self.server_name, diskFileName))
self.download_file(diskUrlStr, diskFileName, cookie, headers)
vmdks.append({"filename":diskFileName,"id":deviceId})
vmdks.append({"filename": diskFileName, "id": deviceId})
finally:
HttpNfcLease.Complete()
return vmdks
def create_ovf(self,vm,vmdks):
def create_ovf(self, vm, vmdks):
ovfDescParams = vim.OvfManager.CreateDescriptorParams()
ovf = si.content.ovfManager.CreateDescriptor(vm, ovfDescParams)
root = ET.fromstring(ovf.ovfDescriptor)
new_id = list(root[0][1].attrib.values())[0][1:3]
ovfFiles = []
for vmdk in vmdks:
old_id = vmdk['id'][1:3]
id = vmdk['id'].replace(old_id,new_id)
ovfFiles.append(vim.OvfManager.OvfFile(size=os.path.getsize(vmdk['filename']), path=vmdk['filename'], deviceId=id))
old_id = vmdk["id"][1:3]
id = vmdk["id"].replace(old_id, new_id)
ovfFiles.append(vim.OvfManager.OvfFile(size=os.path.getsize(vmdk["filename"]), path=vmdk["filename"], deviceId=id))
ovfDescParams = vim.OvfManager.CreateDescriptorParams()
ovfDescParams.ovfFiles = ovfFiles;
ovfDescParams.ovfFiles = ovfFiles
ovf = si.content.ovfManager.CreateDescriptor(vm, ovfDescParams)
ovf_filename = vm.name+".ovf"
self.logger.debug("[%s] creating ovf file: %s" %(self.server_name,ovf_filename))
ovf_filename = vm.name + ".ovf"
self.logger.debug("[%s] creating ovf file: %s" % (self.server_name, ovf_filename))
with open(ovf_filename, "w") as f:
f.write(ovf.ovfDescriptor)
return ovf_filename
def create_ova(self,vm, vmdks, ovf_filename):
ova_filename = vm.name+".ova"
vmdks.insert(0,{"filename":ovf_filename,"id":"false"})
self.logger.debug("[%s] creating ova file: %s" %(self.server_name,ova_filename))
with tarfile.open(ova_filename, "w") as tar:
def create_ova(self, vm, vmdks, ovf_filename):
ova_filename = vm.name + ".ova"
vmdks.insert(0, {"filename": ovf_filename, "id": "false"})
self.logger.debug("[%s] creating ova file: %s" % (self.server_name, ova_filename))
with tarfile.open(ova_filename, "w") as tar:
for vmdk in vmdks:
tar.add(vmdk['filename'])
os.unlink(vmdk['filename'])
tar.add(vmdk["filename"])
os.unlink(vmdk["filename"])
return ova_filename
def clone_vm(self,vm):
task = self.wait_task(vm.CreateSnapshot_Task(name="backup",description="Automatic backup "+datetime.now().strftime("%Y-%m-%d %H:%M:%s"),memory=False,quiesce=True))
snapshot=task.info.result
def clone_vm(self, vm):
task = self.wait_task(
vm.CreateSnapshot_Task(
name="backup", description="Automatic backup " + datetime.now().strftime("%Y-%m-%d %H:%M:%s"), memory=False, quiesce=True
)
)
snapshot = task.info.result
prefix_vmclone = self.prefix_clone
clone_name = prefix_vmclone + vm.name
datastore = '[%s]' % vm.datastore[0].name
datastore = "[%s]" % vm.datastore[0].name
vmx_file = vim.vm.FileInfo(logDirectory=None, snapshotDirectory=None, suspendDirectory=None, vmPathName=datastore)
vmx_file = vim.vm.FileInfo(logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName=datastore)
config = vim.vm.ConfigSpec(name=clone_name, memoryMB=vm.summary.config.memorySizeMB, numCPUs=vm.summary.config.numCpu, files=vmx_file)
config = vim.vm.ConfigSpec(
name=clone_name, memoryMB=vm.summary.config.memorySizeMB, numCPUs=vm.summary.config.numCpu, files=vmx_file
)
hosts = datacenter.hostFolder.childEntity
resource_pool = hosts[0].resourcePool
self.wait_task(vmFolder.CreateVM_Task(config=config,pool=resource_pool))
self.wait_task(vmFolder.CreateVM_Task(config=config, pool=resource_pool))
new_vm = [x for x in vmFolder.childEntity if x.name == clone_name][0]
controller = vim.vm.device.VirtualDeviceSpec()
controller = vim.vm.device.VirtualDeviceSpec()
controller.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
controller.device = vim.vm.device.VirtualLsiLogicController(busNumber=0,sharedBus='noSharing')
controller.device = vim.vm.device.VirtualLsiLogicController(busNumber=0, sharedBus="noSharing")
controller.device.key = 0
i=0
i = 0
vm_devices = []
clone_folder = "%s/" % "/".join(new_vm.summary.config.vmPathName.split('/')[:-1])
clone_folder = "%s/" % "/".join(new_vm.summary.config.vmPathName.split("/")[:-1])
for device in vm.config.hardware.device:
if device.__class__.__name__ == 'vim.vm.device.VirtualDisk':
cur_vers = int(re.findall(r'\d{3,6}', device.backing.fileName)[0])
if device.__class__.__name__ == "vim.vm.device.VirtualDisk":
cur_vers = int(re.findall(r"\d{3,6}", device.backing.fileName)[0])
if cur_vers == 1:
source = device.backing.fileName.replace('-000001','')
source = device.backing.fileName.replace("-000001", "")
else:
source = device.backing.fileName.replace('%d.' % cur_vers,'%d.' % ( cur_vers - 1 ))
source = device.backing.fileName.replace("%d." % cur_vers, "%d." % (cur_vers - 1))
dest = clone_folder+source.split('/')[-1]
disk_spec = vim.VirtualDiskManager.VirtualDiskSpec(diskType="sparseMonolithic",adapterType="ide")
self.wait_task(si.content.virtualDiskManager.CopyVirtualDisk_Task(sourceName=source,destName=dest,destSpec=disk_spec))
# self.wait_task(si.content.virtualDiskManager.ShrinkVirtualDisk_Task(dest))
dest = clone_folder + source.split("/")[-1]
disk_spec = vim.VirtualDiskManager.VirtualDiskSpec(diskType="sparseMonolithic", adapterType="ide")
self.wait_task(si.content.virtualDiskManager.CopyVirtualDisk_Task(sourceName=source, destName=dest, destSpec=disk_spec))
# self.wait_task(si.content.virtualDiskManager.ShrinkVirtualDisk_Task(dest))
diskfileBacking = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskfileBacking.fileName = dest
diskfileBacking.diskMode = "persistent"
diskfileBacking.diskMode = "persistent"
vdisk_spec = vim.vm.device.VirtualDeviceSpec()
vdisk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
vdisk_spec.device = vim.vm.device.VirtualDisk(capacityInKB=10000 ,controllerKey=controller.device.key)
vdisk_spec.device = vim.vm.device.VirtualDisk(capacityInKB=10000, controllerKey=controller.device.key)
vdisk_spec.device.key = 0
vdisk_spec.device.backing = diskfileBacking
vdisk_spec.device.unitNumber = i
vm_devices.append(vdisk_spec)
i+=1
i += 1
vm_devices.append(controller)
config.deviceChange=vm_devices
config.deviceChange = vm_devices
self.wait_task(new_vm.ReconfigVM_Task(config))
self.wait_task(snapshot.RemoveSnapshot_Task(removeChildren=True))
return new_vm
def wait_task(self,task):
def wait_task(self, task):
while task.info.state in ["queued", "running"]:
time.sleep(2)
self.logger.debug("[%s] %s",self.server_name,task.info.descriptionId)
self.logger.debug("[%s] %s", self.server_name, task.info.descriptionId)
return task
def do_backup(self,stats):
def do_backup(self, stats):
try:
dest_dir = os.path.join(self.backup_dir,"%s" % self.backup_start_date)
dest_dir = os.path.join(self.backup_dir, "%s" % self.backup_start_date)
if not os.path.isdir(dest_dir):
if not self.dry_run:
os.makedirs(dest_dir)
else:
print('mkdir "%s"' % dest_dir)
else:
raise Exception('backup destination directory already exists : %s' % dest_dir)
raise Exception("backup destination directory already exists : %s" % dest_dir)
os.chdir(dest_dir)
user_esx, password_esx, null = open(self.password_file).read().split('\n')
user_esx, password_esx, null = open(self.password_file).read().split("\n")
global si
si = SmartConnect(host=self.esxhost,user=user_esx,pwd=password_esx,port=self.esx_port)
si = SmartConnect(host=self.esxhost, user=user_esx, pwd=password_esx, port=self.esx_port)
if not si:
raise Exception("Could not connect to the specified host using specified "
"username and password")
raise Exception("Could not connect to the specified host using specified " "username and password")
atexit.register(Disconnect, si)
content = si.RetrieveContent()
for child in content.rootFolder.childEntity:
if hasattr(child, 'vmFolder'):
if hasattr(child, "vmFolder"):
global vmFolder, datacenter
datacenter = child
vmFolder = datacenter.vmFolder
@ -240,7 +232,7 @@ class backup_vmdk(backup_generic):
vm.ShutdownGuest()
vm_is_off = True
if vm_is_off:
if vm_is_off:
vmdks = self.export_vmdks(vm)
ovf_filename = self.create_ovf(vm, vmdks)
else:
@ -250,32 +242,29 @@ class backup_vmdk(backup_generic):
self.wait_task(new_vm.Destroy_Task())
if str2bool(self.create_ovafile):
ova_filename = self.create_ova(vm, vmdks, ovf_filename)
ova_filename = self.create_ova(vm, vmdks, ovf_filename) # noqa : F841
if str2bool(self.halt_vm):
vm.PowerOnVM()
if os.path.exists(dest_dir):
for file in os.listdir(dest_dir):
stats['written_bytes'] += os.stat(file)[ST_SIZE]
stats['total_files_count'] += 1
stats['written_files_count'] += 1
stats['total_bytes'] = stats['written_bytes']
stats["written_bytes"] += os.stat(file)[ST_SIZE]
stats["total_files_count"] += 1
stats["written_files_count"] += 1
stats["total_bytes"] = stats["written_bytes"]
else:
stats['written_bytes'] = 0
stats["written_bytes"] = 0
stats['backup_location'] = dest_dir
stats['log']='XVA backup from %s OK, %d bytes written' % (self.server_name,stats['written_bytes'])
stats['status']='OK'
stats["backup_location"] = dest_dir
stats["log"] = "XVA backup from %s OK, %d bytes written" % (self.server_name, stats["written_bytes"])
stats["status"] = "OK"
except BaseException as e:
stats['status']='ERROR'
stats['log']=str(e)
stats["status"] = "ERROR"
stats["log"] = str(e)
raise
register_driver(backup_vmdk)

View File

@ -19,7 +19,6 @@
# -----------------------------------------------------------------------
import paramiko
from .common import *
@ -27,67 +26,76 @@ from .common import *
class backup_xcp_metadata(backup_generic):
"""Backup metatdata of a xcp pool using xe pool-dump-database"""
type = 'xcp-dump-metadata'
required_params = ['type','server_name','private_key','backup_name']
def do_backup(self,stats):
self.logger.debug('[%s] Connecting to %s with user root and key %s',self.backup_name,self.server_name,self.private_key)
type = "xcp-dump-metadata"
required_params = ["type", "server_name", "private_key", "backup_name"]
def do_backup(self, stats):
self.logger.debug("[%s] Connecting to %s with user root and key %s", self.backup_name, self.server_name, self.private_key)
t = datetime.datetime.now()
backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
backup_start_date = t.strftime("%Y%m%d-%Hh%Mm%S")
# dump pool medatadata
localpath = os.path.join(self.backup_dir , 'xcp_metadata-' + backup_start_date + '.dump')
stats['status']='Dumping'
localpath = os.path.join(self.backup_dir, "xcp_metadata-" + backup_start_date + ".dump")
stats["status"] = "Dumping"
if not self.dry_run:
cmd = "/opt/xensource/bin/xe pool-dump-database file-name="
self.logger.debug('[%s] Dump XCP Metadata : %s', self.backup_name, cmd)
(error_code, output) = ssh_exec(cmd, server_name=self.server_name,private_key=self.private_key, remote_user='root')
self.logger.debug("[%s] Dump XCP Metadata : %s", self.backup_name, cmd)
(error_code, output) = ssh_exec(cmd, server_name=self.server_name, private_key=self.private_key, remote_user="root")
with open(localpath,"w") as f:
with open(localpath, "w") as f:
f.write(output)
# zip the file
stats['status']='Zipping'
cmd = 'gzip %s ' % localpath
self.logger.debug('[%s] Compress backup : %s',self.backup_name,cmd)
stats["status"] = "Zipping"
cmd = "gzip %s " % localpath
self.logger.debug("[%s] Compress backup : %s", self.backup_name, cmd)
if not self.dry_run:
call_external_process(cmd)
localpath += ".gz"
if not self.dry_run:
stats['total_files_count']=1
stats['written_files_count']=1
stats['total_bytes']=os.stat(localpath).st_size
stats['written_bytes']=os.stat(localpath).st_size
stats['log']='gzip dump of DB %s:%s (%d bytes) to %s' % (self.server_name,'xcp metadata dump', stats['written_bytes'], localpath)
stats['backup_location'] = localpath
stats['status']='OK'
stats["total_files_count"] = 1
stats["written_files_count"] = 1
stats["total_bytes"] = os.stat(localpath).st_size
stats["written_bytes"] = os.stat(localpath).st_size
stats["log"] = "gzip dump of DB %s:%s (%d bytes) to %s" % (self.server_name, "xcp metadata dump", stats["written_bytes"], localpath)
stats["backup_location"] = localpath
stats["status"] = "OK"
def register_existingbackups(self):
"""scan metatdata backup files and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
p = re.compile('^%s-(?P<date>\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}).dump.gz$' % self.server_name)
p = re.compile("^%s-(?P<date>\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}).dump.gz$" % self.server_name)
for item in filelist:
sr = p.match(item)
if sr:
file_name = os.path.join(self.backup_dir,item)
start = datetime.datetime.strptime(sr.groups()[0],'%Y%m%d-%Hh%Mm%S').isoformat()
if not file_name in registered:
self.logger.info('Registering %s from %s',file_name,fileisodate(file_name))
size_bytes = int(os.popen('du -sb "%s"' % file_name).read().split('\t')[0])
self.logger.debug(' Size in bytes : %i',size_bytes)
file_name = os.path.join(self.backup_dir, item)
start = datetime.datetime.strptime(sr.groups()[0], "%Y%m%d-%Hh%Mm%S").isoformat()
if file_name not in registered:
self.logger.info("Registering %s from %s", file_name, fileisodate(file_name))
size_bytes = int(os.popen('du -sb "%s"' % file_name).read().split("\t")[0])
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end=fileisodate(file_name),status='OK',total_bytes=size_bytes,backup_location=file_name)
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=fileisodate(file_name),
status="OK",
total_bytes=size_bytes,
backup_location=file_name,
)
else:
self.logger.info('Skipping %s from %s, already registered',file_name,fileisodate(file_name))
self.logger.info("Skipping %s from %s, already registered", file_name, fileisodate(file_name))
register_driver(backup_xcp_metadata)

View File

@ -36,16 +36,24 @@ import requests
from . import XenAPI
from .common import *
if hasattr(ssl, '_create_unverified_context'):
if hasattr(ssl, "_create_unverified_context"):
ssl._create_default_https_context = ssl._create_unverified_context
class backup_xva(backup_generic):
"""Backup a VM running on a XCP server as a XVA file (requires xe tools and XenAPI)"""
type = 'xen-xva'
required_params = backup_generic.required_params + ['xcphost','password_file','server_name']
optional_params = backup_generic.optional_params + ['enable_https', 'halt_vm', 'verify_export', 'reuse_snapshot', 'ignore_proxies', 'use_compression' ]
type = "xen-xva"
required_params = backup_generic.required_params + ["xcphost", "password_file", "server_name"]
optional_params = backup_generic.optional_params + [
"enable_https",
"halt_vm",
"verify_export",
"reuse_snapshot",
"ignore_proxies",
"use_compression",
]
enable_https = "no"
halt_vm = "no"
@ -55,34 +63,33 @@ class backup_xva(backup_generic):
use_compression = "true"
if str2bool(ignore_proxies):
os.environ['http_proxy']=""
os.environ['https_proxy']=""
os.environ["http_proxy"] = ""
os.environ["https_proxy"] = ""
def verify_export_xva(self,filename):
self.logger.debug("[%s] Verify xva export integrity",self.server_name)
def verify_export_xva(self, filename):
self.logger.debug("[%s] Verify xva export integrity", self.server_name)
tar = tarfile.open(filename)
members = tar.getmembers()
for tarinfo in members:
if re.search('^[0-9]*$',os.path.basename(tarinfo.name)):
if re.search("^[0-9]*$", os.path.basename(tarinfo.name)):
sha1sum = hashlib.sha1(tar.extractfile(tarinfo).read()).hexdigest()
sha1sum2 = tar.extractfile(tarinfo.name+'.checksum').read()
sha1sum2 = tar.extractfile(tarinfo.name + ".checksum").read()
if not sha1sum == sha1sum2:
raise Exception("File corrupt")
tar.close()
def export_xva(self, vdi_name, filename, halt_vm,dry_run,enable_https=True, reuse_snapshot="no"):
user_xen, password_xen, null = open(self.password_file).read().split('\n')
session = XenAPI.Session('https://'+self.xcphost)
def export_xva(self, vdi_name, filename, halt_vm, dry_run, enable_https=True, reuse_snapshot="no"):
user_xen, password_xen, null = open(self.password_file).read().split("\n")
session = XenAPI.Session("https://" + self.xcphost)
try:
session.login_with_password(user_xen,password_xen)
session.login_with_password(user_xen, password_xen)
except XenAPI.Failure as error:
msg,ip = error.details
msg, ip = error.details
if msg == 'HOST_IS_SLAVE':
if msg == "HOST_IS_SLAVE":
xcphost = ip
session = XenAPI.Session('https://'+xcphost)
session.login_with_password(user_xen,password_xen)
session = XenAPI.Session("https://" + xcphost)
session.login_with_password(user_xen, password_xen)
if not session.xenapi.VM.get_by_name_label(vdi_name):
return "bad VM name: %s" % vdi_name
@ -90,105 +97,101 @@ class backup_xva(backup_generic):
vm = session.xenapi.VM.get_by_name_label(vdi_name)[0]
status_vm = session.xenapi.VM.get_power_state(vm)
self.logger.debug("[%s] Check if previous fail backups exist",vdi_name)
backups_fail = files = [f for f in os.listdir(self.backup_dir) if f.startswith(vdi_name) and f.endswith(".tmp")]
self.logger.debug("[%s] Check if previous fail backups exist", vdi_name)
backups_fail = [f for f in os.listdir(self.backup_dir) if f.startswith(vdi_name) and f.endswith(".tmp")]
for backup_fail in backups_fail:
self.logger.debug('[%s] Delete backup "%s"', vdi_name, backup_fail)
os.unlink(os.path.join(self.backup_dir, backup_fail))
#add snapshot option
# add snapshot option
if not str2bool(halt_vm):
self.logger.debug("[%s] Check if previous tisbackups snapshots exist",vdi_name)
old_snapshots = session.xenapi.VM.get_by_name_label("tisbackup-%s"%(vdi_name))
self.logger.debug("[%s] Check if previous tisbackups snapshots exist", vdi_name)
old_snapshots = session.xenapi.VM.get_by_name_label("tisbackup-%s" % (vdi_name))
self.logger.debug("[%s] Old snaps count %s", vdi_name, len(old_snapshots))
if len(old_snapshots) == 1 and str2bool(reuse_snapshot) == True:
if len(old_snapshots) == 1 and str2bool(reuse_snapshot):
snapshot = old_snapshots[0]
self.logger.debug("[%s] Reusing snap \"%s\"", vdi_name, session.xenapi.VM.get_name_description(snapshot))
vm = snapshot # vm = session.xenapi.VM.get_by_name_label("tisbackup-%s"%(vdi_name))[0]
self.logger.debug('[%s] Reusing snap "%s"', vdi_name, session.xenapi.VM.get_name_description(snapshot))
vm = snapshot # vm = session.xenapi.VM.get_by_name_label("tisbackup-%s"%(vdi_name))[0]
else:
self.logger.debug("[%s] Deleting %s old snaps", vdi_name, len(old_snapshots))
for old_snapshot in old_snapshots:
self.logger.debug("[%s] Destroy snapshot %s",vdi_name,session.xenapi.VM.get_name_description(old_snapshot))
self.logger.debug("[%s] Destroy snapshot %s", vdi_name, session.xenapi.VM.get_name_description(old_snapshot))
try:
for vbd in session.xenapi.VM.get_VBDs(old_snapshot):
if session.xenapi.VBD.get_type(vbd) == 'CD' and session.xenapi.VBD.get_record(vbd)['empty'] == False:
if session.xenapi.VBD.get_type(vbd) == "CD" and not session.xenapi.VBD.get_record(vbd)["empty"]:
session.xenapi.VBD.eject(vbd)
else:
vdi = session.xenapi.VBD.get_VDI(vbd)
if not 'NULL' in vdi:
if "NULL" not in vdi:
session.xenapi.VDI.destroy(vdi)
session.xenapi.VM.destroy(old_snapshot)
except XenAPI.Failure as error:
return("error when destroy snapshot %s"%(error))
return "error when destroy snapshot %s" % (error)
now = datetime.datetime.now()
self.logger.debug("[%s] Snapshot in progress",vdi_name)
self.logger.debug("[%s] Snapshot in progress", vdi_name)
try:
snapshot = session.xenapi.VM.snapshot(vm,"tisbackup-%s"%(vdi_name))
snapshot = session.xenapi.VM.snapshot(vm, "tisbackup-%s" % (vdi_name))
self.logger.debug("[%s] got snapshot %s", vdi_name, snapshot)
except XenAPI.Failure as error:
return("error when snapshot %s"%(error))
#get snapshot opaqueRef
vm = session.xenapi.VM.get_by_name_label("tisbackup-%s"%(vdi_name))[0]
session.xenapi.VM.set_name_description(snapshot,"snapshot created by tisbackup on: %s"%(now.strftime("%Y-%m-%d %H:%M")))
return "error when snapshot %s" % (error)
# get snapshot opaqueRef
vm = session.xenapi.VM.get_by_name_label("tisbackup-%s" % (vdi_name))[0]
session.xenapi.VM.set_name_description(snapshot, "snapshot created by tisbackup on: %s" % (now.strftime("%Y-%m-%d %H:%M")))
else:
self.logger.debug("[%s] Status of VM: %s",self.backup_name,status_vm)
self.logger.debug("[%s] Status of VM: %s", self.backup_name, status_vm)
if status_vm == "Running":
self.logger.debug("[%s] Shudown in progress",self.backup_name)
self.logger.debug("[%s] Shudown in progress", self.backup_name)
if dry_run:
print("session.xenapi.VM.clean_shutdown(vm)")
else:
session.xenapi.VM.clean_shutdown(vm)
try:
try:
filename_temp = filename+".tmp"
self.logger.debug("[%s] Copy in progress",self.backup_name)
filename_temp = filename + ".tmp"
self.logger.debug("[%s] Copy in progress", self.backup_name)
if not str2bool(self.use_compression):
socket.setdefaulttimeout(120)
scheme = "http://"
if str2bool(enable_https):
scheme = "https://"
url = scheme+user_xen+":"+password_xen+"@"+self.xcphost+"/export?use_compression="+self.use_compression+"&uuid="+session.xenapi.VM.get_uuid(vm)
top_level_url = scheme+self.xcphost+"/export?use_compression="+self.use_compression+"&uuid="+session.xenapi.VM.get_uuid(vm)
# url = scheme+user_xen+":"+password_xen+"@"+self.xcphost+"/export?use_compression="+self.use_compression+"&uuid="+session.xenapi.VM.get_uuid(vm)
top_level_url = (
scheme + self.xcphost + "/export?use_compression=" + self.use_compression + "&uuid=" + session.xenapi.VM.get_uuid(vm)
)
r = requests.get(top_level_url, auth=(user_xen, password_xen))
open(filename_temp, 'wb').write(r.content)
open(filename_temp, "wb").write(r.content)
except Exception as e:
self.logger.error("[%s] error when fetching snap: %s", "tisbackup-%s"%(vdi_name), e)
self.logger.error("[%s] error when fetching snap: %s", "tisbackup-%s" % (vdi_name), e)
if os.path.exists(filename_temp):
os.unlink(filename_temp)
raise
finally:
if not str2bool(halt_vm):
self.logger.debug("[%s] Destroy snapshot",'tisbackup-%s'%(vdi_name))
self.logger.debug("[%s] Destroy snapshot", "tisbackup-%s" % (vdi_name))
try:
for vbd in session.xenapi.VM.get_VBDs(snapshot):
if session.xenapi.VBD.get_type(vbd) == 'CD' and session.xenapi.VBD.get_record(vbd)['empty'] == False:
if session.xenapi.VBD.get_type(vbd) == "CD" and not session.xenapi.VBD.get_record(vbd)["empty"]:
session.xenapi.VBD.eject(vbd)
else:
vdi = session.xenapi.VBD.get_VDI(vbd)
if not 'NULL' in vdi:
if "NULL" not in vdi:
session.xenapi.VDI.destroy(vdi)
session.xenapi.VM.destroy(snapshot)
except XenAPI.Failure as error:
return("error when destroy snapshot %s"%(error))
return "error when destroy snapshot %s" % (error)
elif status_vm == "Running":
self.logger.debug("[%s] Starting in progress",self.backup_name)
self.logger.debug("[%s] Starting in progress", self.backup_name)
if dry_run:
print("session.xenapi.Async.VM.start(vm,False,True)")
else:
session.xenapi.Async.VM.start(vm,False,True)
session.xenapi.Async.VM.start(vm, False, True)
session.logout()
@ -196,85 +199,102 @@ class backup_xva(backup_generic):
tar = os.system('tar tf "%s" > /dev/null' % filename_temp)
if not tar == 0:
os.unlink(filename_temp)
return("Tar error")
return "Tar error"
if str2bool(self.verify_export):
self.verify_export_xva(filename_temp)
os.rename(filename_temp,filename)
os.rename(filename_temp, filename)
return(0)
return 0
def do_backup(self,stats):
def do_backup(self, stats):
try:
dest_filename = os.path.join(self.backup_dir,"%s-%s.%s" % (self.backup_name,self.backup_start_date,'xva'))
dest_filename = os.path.join(self.backup_dir, "%s-%s.%s" % (self.backup_name, self.backup_start_date, "xva"))
options = []
options_params = " ".join(options)
cmd = self.export_xva( vdi_name= self.server_name,filename= dest_filename, halt_vm= self.halt_vm, enable_https=self.enable_https, dry_run= self.dry_run, reuse_snapshot=self.reuse_snapshot)
# options = []
# options_params = " ".join(options)
cmd = self.export_xva(
vdi_name=self.server_name,
filename=dest_filename,
halt_vm=self.halt_vm,
enable_https=self.enable_https,
dry_run=self.dry_run,
reuse_snapshot=self.reuse_snapshot,
)
if os.path.exists(dest_filename):
stats['written_bytes'] = os.stat(dest_filename)[ST_SIZE]
stats['total_files_count'] = 1
stats['written_files_count'] = 1
stats['total_bytes'] = stats['written_bytes']
stats["written_bytes"] = os.stat(dest_filename)[ST_SIZE]
stats["total_files_count"] = 1
stats["written_files_count"] = 1
stats["total_bytes"] = stats["written_bytes"]
else:
stats['written_bytes'] = 0
stats["written_bytes"] = 0
stats['backup_location'] = dest_filename
stats["backup_location"] = dest_filename
if cmd == 0:
stats['log']='XVA backup from %s OK, %d bytes written' % (self.server_name,stats['written_bytes'])
stats['status']='OK'
stats["log"] = "XVA backup from %s OK, %d bytes written" % (self.server_name, stats["written_bytes"])
stats["status"] = "OK"
else:
raise Exception(cmd)
except BaseException as e:
stats['status']='ERROR'
stats['log']=str(e)
stats["status"] = "ERROR"
stats["log"] = str(e)
raise
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))]
registered = [
b["backup_location"]
for b in self.dbstat.query("select distinct backup_location from stats where backup_name=?", (self.backup_name,))
]
filelist = os.listdir(self.backup_dir)
filelist.sort()
for item in filelist:
if item.endswith('.xva'):
dir_name = os.path.join(self.backup_dir,item)
if not dir_name in registered:
start = (datetime.datetime.strptime(item,self.backup_name+'-%Y%m%d-%Hh%Mm%S.xva') + datetime.timedelta(0,30*60)).isoformat()
if fileisodate(dir_name)>start:
if item.endswith(".xva"):
dir_name = os.path.join(self.backup_dir, item)
if dir_name not in registered:
start = (
datetime.datetime.strptime(item, self.backup_name + "-%Y%m%d-%Hh%Mm%S.xva") + datetime.timedelta(0, 30 * 60)
).isoformat()
if fileisodate(dir_name) > start:
stop = fileisodate(dir_name)
else:
stop = start
self.logger.info('Registering %s started on %s',dir_name,start)
self.logger.debug(' Disk usage %s','du -sb "%s"' % dir_name)
self.logger.info("Registering %s started on %s", dir_name, start)
self.logger.debug(" Disk usage %s", 'du -sb "%s"' % dir_name)
if not self.dry_run:
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split('\t')[0])
size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split("\t")[0])
else:
size_bytes = 0
self.logger.debug(' Size in bytes : %i',size_bytes)
self.logger.debug(" Size in bytes : %i", size_bytes)
if not self.dry_run:
self.dbstat.add(self.backup_name,self.server_name,'',\
backup_start=start,backup_end = stop,status='OK',total_bytes=size_bytes,backup_location=dir_name,TYPE='BACKUP')
self.dbstat.add(
self.backup_name,
self.server_name,
"",
backup_start=start,
backup_end=stop,
status="OK",
total_bytes=size_bytes,
backup_location=dir_name,
TYPE="BACKUP",
)
else:
self.logger.info('Skipping %s, already registered',dir_name)
self.logger.info("Skipping %s, already registered", dir_name)
register_driver(backup_xva)
if __name__=='__main__':
logger = logging.getLogger('tisbackup')
if __name__ == "__main__":
logger = logging.getLogger("tisbackup")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
cp = ConfigParser()
cp.read('/opt/tisbackup/configtest.ini')
cp.read("/opt/tisbackup/configtest.ini")
b = backup_xva()
b.read_config(cp)

File diff suppressed because it is too large Load Diff

View File

@ -36,262 +36,252 @@ from stat import *
from . import XenAPI
from .common import *
if hasattr(ssl, '_create_unverified_context'):
if hasattr(ssl, "_create_unverified_context"):
ssl._create_default_https_context = ssl._create_unverified_context
class copy_vm_xcp(backup_generic):
"""Backup a VM running on a XCP server on a second SR (requires xe tools and XenAPI)"""
type = 'copy-vm-xcp'
required_params = backup_generic.required_params + ['server_name','storage_name','password_file','vm_name','network_name']
optional_params = backup_generic.optional_params + ['start_vm','max_copies', 'delete_snapshot', 'halt_vm']
type = "copy-vm-xcp"
required_params = backup_generic.required_params + ["server_name", "storage_name", "password_file", "vm_name", "network_name"]
optional_params = backup_generic.optional_params + ["start_vm", "max_copies", "delete_snapshot", "halt_vm"]
start_vm = "no"
max_copies = 1
halt_vm = "no"
delete_snapshot = "yes"
def read_config(self,iniconf):
assert(isinstance(iniconf,ConfigParser))
backup_generic.read_config(self,iniconf)
if self.start_vm in 'no' and iniconf.has_option('global','start_vm'):
self.start_vm = iniconf.get('global','start_vm')
if self.max_copies == 1 and iniconf.has_option('global','max_copies'):
self.max_copies = iniconf.getint('global','max_copies')
if self.delete_snapshot == "yes" and iniconf.has_option('global','delete_snapshot'):
self.delete_snapshot = iniconf.get('global','delete_snapshot')
def read_config(self, iniconf):
assert isinstance(iniconf, ConfigParser)
backup_generic.read_config(self, iniconf)
if self.start_vm in "no" and iniconf.has_option("global", "start_vm"):
self.start_vm = iniconf.get("global", "start_vm")
if self.max_copies == 1 and iniconf.has_option("global", "max_copies"):
self.max_copies = iniconf.getint("global", "max_copies")
if self.delete_snapshot == "yes" and iniconf.has_option("global", "delete_snapshot"):
self.delete_snapshot = iniconf.get("global", "delete_snapshot")
def copy_vm_to_sr(self, vm_name, storage_name, dry_run, delete_snapshot="yes"):
user_xen, password_xen, null = open(self.password_file).read().split('\n')
session = XenAPI.Session('https://'+self.server_name)
try:
session.login_with_password(user_xen,password_xen)
except XenAPI.Failure as error:
msg,ip = error.details
user_xen, password_xen, null = open(self.password_file).read().split("\n")
session = XenAPI.Session("https://" + self.server_name)
try:
session.login_with_password(user_xen, password_xen)
except XenAPI.Failure as error:
msg, ip = error.details
if msg == 'HOST_IS_SLAVE':
server_name = ip
session = XenAPI.Session('https://'+server_name)
session.login_with_password(user_xen,password_xen)
if msg == "HOST_IS_SLAVE":
server_name = ip
session = XenAPI.Session("https://" + server_name)
session.login_with_password(user_xen, password_xen)
self.logger.debug("[%s] VM (%s) to backup in storage: %s", self.backup_name, vm_name, storage_name)
now = datetime.datetime.now()
self.logger.debug("[%s] VM (%s) to backup in storage: %s",self.backup_name,vm_name,storage_name)
now = datetime.datetime.now()
#get storage opaqueRef
try:
storage = session.xenapi.SR.get_by_name_label(storage_name)[0]
except IndexError as error:
result = (1,"error get SR opaqueref %s"%(error))
return result
#get vm to copy opaqueRef
try:
vm = session.xenapi.VM.get_by_name_label(vm_name)[0]
except IndexError as error:
result = (1,"error get VM opaqueref %s"%(error))
return result
# get vm backup network opaqueRef
try:
networkRef = session.xenapi.network.get_by_name_label(self.network_name)[0]
except IndexError as error:
result = (1, "error get VM network opaqueref %s" % (error))
return result
if str2bool(self.halt_vm):
status_vm = session.xenapi.VM.get_power_state(vm)
self.logger.debug("[%s] Status of VM: %s",self.backup_name,status_vm)
if status_vm == "Running":
self.logger.debug("[%s] Shutdown in progress",self.backup_name)
if dry_run:
print("session.xenapi.VM.clean_shutdown(vm)")
else:
session.xenapi.VM.clean_shutdown(vm)
snapshot = vm
else:
#do the snapshot
self.logger.debug("[%s] Snapshot in progress",self.backup_name)
try:
snapshot = session.xenapi.VM.snapshot(vm,"tisbackup-%s"%(vm_name))
except XenAPI.Failure as error:
result = (1,"error when snapshot %s"%(error))
return result
#get snapshot opaqueRef
snapshot = session.xenapi.VM.get_by_name_label("tisbackup-%s"%(vm_name))[0]
session.xenapi.VM.set_name_description(snapshot,"snapshot created by tisbackup on : %s"%(now.strftime("%Y-%m-%d %H:%M")))
vm_backup_name = "zzz-%s-"%(vm_name)
#Check if old backup exit
list_backups = []
for vm_ref in session.xenapi.VM.get_all():
name_lablel = session.xenapi.VM.get_name_label(vm_ref)
if vm_backup_name in name_lablel:
list_backups.append(name_lablel)
list_backups.sort()
if len(list_backups) >= 1:
# Shutting last backup if started
last_backup_vm = session.xenapi.VM.get_by_name_label(list_backups[-1])[0]
if not "Halted" in session.xenapi.VM.get_power_state(last_backup_vm):
self.logger.debug("[%s] Shutting down last backup vm : %s", self.backup_name, list_backups[-1] )
session.xenapi.VM.hard_shutdown(last_backup_vm)
# Delete oldest backup if exist
if len(list_backups) >= int(self.max_copies):
for i in range(len(list_backups)-int(self.max_copies)+1):
oldest_backup_vm = session.xenapi.VM.get_by_name_label(list_backups[i])[0]
if not "Halted" in session.xenapi.VM.get_power_state(oldest_backup_vm):
self.logger.debug("[%s] Shutting down old vm : %s", self.backup_name, list_backups[i] )
session.xenapi.VM.hard_shutdown(oldest_backup_vm)
try:
self.logger.debug("[%s] Deleting old vm : %s", self.backup_name, list_backups[i])
for vbd in session.xenapi.VM.get_VBDs(oldest_backup_vm):
if session.xenapi.VBD.get_type(vbd) == 'CD'and session.xenapi.VBD.get_record(vbd)['empty'] == False:
session.xenapi.VBD.eject(vbd)
else:
vdi = session.xenapi.VBD.get_VDI(vbd)
if not 'NULL' in vdi:
session.xenapi.VDI.destroy(vdi)
session.xenapi.VM.destroy(oldest_backup_vm)
except XenAPI.Failure as error:
result = (1,"error when destroy old backup vm %s"%(error))
return result
self.logger.debug("[%s] Copy %s in progress on %s",self.backup_name,vm_name,storage_name)
try:
backup_vm = session.xenapi.VM.copy(snapshot,vm_backup_name+now.strftime("%Y-%m-%d %H:%M"),storage)
except XenAPI.Failure as error:
result = (1,"error when copy %s"%(error))
return result
# define VM as a template
session.xenapi.VM.set_is_a_template(backup_vm,False)
#change the network of the new VM
try:
vifDestroy = session.xenapi.VM.get_VIFs(backup_vm)
except IndexError as error:
result = (1,"error get VIF opaqueref %s"%(error))
return result
for i in vifDestroy:
vifRecord = session.xenapi.VIF.get_record(i)
session.xenapi.VIF.destroy(i)
data = {'MAC': vifRecord['MAC'],
'MAC_autogenerated': False,
'MTU': vifRecord['MTU'],
'VM': backup_vm,
'current_operations': vifRecord['current_operations'],
'currently_attached': vifRecord['currently_attached'],
'device': vifRecord['device'],
'ipv4_allowed': vifRecord['ipv4_allowed'],
'ipv6_allowed': vifRecord['ipv6_allowed'],
'locking_mode': vifRecord['locking_mode'],
'network': networkRef,
'other_config': vifRecord['other_config'],
'qos_algorithm_params': vifRecord['qos_algorithm_params'],
'qos_algorithm_type': vifRecord['qos_algorithm_type'],
'qos_supported_algorithms': vifRecord['qos_supported_algorithms'],
'runtime_properties': vifRecord['runtime_properties'],
'status_code': vifRecord['status_code'],
'status_detail': vifRecord['status_detail']
}
try:
session.xenapi.VIF.create(data)
except Exception as error:
result = (1,error)
return result
if self.start_vm in ['true', '1', 't', 'y', 'yes', 'oui']:
session.xenapi.VM.start(backup_vm,False,True)
session.xenapi.VM.set_name_description(backup_vm,"snapshot created by tisbackup on : %s"%(now.strftime("%Y-%m-%d %H:%M")))
size_backup = 0
for vbd in session.xenapi.VM.get_VBDs(backup_vm):
if session.xenapi.VBD.get_type(vbd) == 'CD' and session.xenapi.VBD.get_record(vbd)['empty'] == False:
session.xenapi.VBD.eject(vbd)
else:
vdi = session.xenapi.VBD.get_VDI(vbd)
if not 'NULL' in vdi:
size_backup = size_backup + int(session.xenapi.VDI.get_record(vdi)['physical_utilisation'])
result = (0,size_backup)
if self.delete_snapshot == 'no':
return result
#Disable automatic boot
if 'auto_poweron' in session.xenapi.VM.get_other_config(backup_vm):
session.xenapi.VM.remove_from_other_config(backup_vm, "auto_poweron")
if not str2bool(self.halt_vm):
#delete the snapshot
try:
for vbd in session.xenapi.VM.get_VBDs(snapshot):
if session.xenapi.VBD.get_type(vbd) == 'CD' and session.xenapi.VBD.get_record(vbd)['empty'] == False:
session.xenapi.VBD.eject(vbd)
else:
vdi = session.xenapi.VBD.get_VDI(vbd)
if not 'NULL' in vdi:
session.xenapi.VDI.destroy(vdi)
session.xenapi.VM.destroy(snapshot)
except XenAPI.Failure as error:
result = (1,"error when destroy snapshot %s"%(error))
return result
else:
if status_vm == "Running":
self.logger.debug("[%s] Starting in progress",self.backup_name)
if dry_run:
print("session.xenapi.VM.start(vm,False,True)")
else:
session.xenapi.VM.start(vm,False,True)
# get storage opaqueRef
try:
storage = session.xenapi.SR.get_by_name_label(storage_name)[0]
except IndexError as error:
result = (1, "error get SR opaqueref %s" % (error))
return result
def do_backup(self,stats):
# get vm to copy opaqueRef
try:
timestamp = int(time.time())
vm = session.xenapi.VM.get_by_name_label(vm_name)[0]
except IndexError as error:
result = (1, "error get VM opaqueref %s" % (error))
return result
# get vm backup network opaqueRef
try:
networkRef = session.xenapi.network.get_by_name_label(self.network_name)[0]
except IndexError as error:
result = (1, "error get VM network opaqueref %s" % (error))
return result
if str2bool(self.halt_vm):
status_vm = session.xenapi.VM.get_power_state(vm)
self.logger.debug("[%s] Status of VM: %s", self.backup_name, status_vm)
if status_vm == "Running":
self.logger.debug("[%s] Shutdown in progress", self.backup_name)
if dry_run:
print("session.xenapi.VM.clean_shutdown(vm)")
else:
session.xenapi.VM.clean_shutdown(vm)
snapshot = vm
else:
# do the snapshot
self.logger.debug("[%s] Snapshot in progress", self.backup_name)
try:
snapshot = session.xenapi.VM.snapshot(vm, "tisbackup-%s" % (vm_name))
except XenAPI.Failure as error:
result = (1, "error when snapshot %s" % (error))
return result
# get snapshot opaqueRef
snapshot = session.xenapi.VM.get_by_name_label("tisbackup-%s" % (vm_name))[0]
session.xenapi.VM.set_name_description(snapshot, "snapshot created by tisbackup on : %s" % (now.strftime("%Y-%m-%d %H:%M")))
vm_backup_name = "zzz-%s-" % (vm_name)
# Check if old backup exit
list_backups = []
for vm_ref in session.xenapi.VM.get_all():
name_lablel = session.xenapi.VM.get_name_label(vm_ref)
if vm_backup_name in name_lablel:
list_backups.append(name_lablel)
list_backups.sort()
if len(list_backups) >= 1:
# Shutting last backup if started
last_backup_vm = session.xenapi.VM.get_by_name_label(list_backups[-1])[0]
if "Halted" not in session.xenapi.VM.get_power_state(last_backup_vm):
self.logger.debug("[%s] Shutting down last backup vm : %s", self.backup_name, list_backups[-1])
session.xenapi.VM.hard_shutdown(last_backup_vm)
# Delete oldest backup if exist
if len(list_backups) >= int(self.max_copies):
for i in range(len(list_backups) - int(self.max_copies) + 1):
oldest_backup_vm = session.xenapi.VM.get_by_name_label(list_backups[i])[0]
if "Halted" not in session.xenapi.VM.get_power_state(oldest_backup_vm):
self.logger.debug("[%s] Shutting down old vm : %s", self.backup_name, list_backups[i])
session.xenapi.VM.hard_shutdown(oldest_backup_vm)
try:
self.logger.debug("[%s] Deleting old vm : %s", self.backup_name, list_backups[i])
for vbd in session.xenapi.VM.get_VBDs(oldest_backup_vm):
if session.xenapi.VBD.get_type(vbd) == "CD" and not session.xenapi.VBD.get_record(vbd)["empty"]:
session.xenapi.VBD.eject(vbd)
else:
vdi = session.xenapi.VBD.get_VDI(vbd)
if "NULL" not in vdi:
session.xenapi.VDI.destroy(vdi)
session.xenapi.VM.destroy(oldest_backup_vm)
except XenAPI.Failure as error:
result = (1, "error when destroy old backup vm %s" % (error))
return result
self.logger.debug("[%s] Copy %s in progress on %s", self.backup_name, vm_name, storage_name)
try:
backup_vm = session.xenapi.VM.copy(snapshot, vm_backup_name + now.strftime("%Y-%m-%d %H:%M"), storage)
except XenAPI.Failure as error:
result = (1, "error when copy %s" % (error))
return result
# define VM as a template
session.xenapi.VM.set_is_a_template(backup_vm, False)
# change the network of the new VM
try:
vifDestroy = session.xenapi.VM.get_VIFs(backup_vm)
except IndexError as error:
result = (1, "error get VIF opaqueref %s" % (error))
return result
for i in vifDestroy:
vifRecord = session.xenapi.VIF.get_record(i)
session.xenapi.VIF.destroy(i)
data = {
"MAC": vifRecord["MAC"],
"MAC_autogenerated": False,
"MTU": vifRecord["MTU"],
"VM": backup_vm,
"current_operations": vifRecord["current_operations"],
"currently_attached": vifRecord["currently_attached"],
"device": vifRecord["device"],
"ipv4_allowed": vifRecord["ipv4_allowed"],
"ipv6_allowed": vifRecord["ipv6_allowed"],
"locking_mode": vifRecord["locking_mode"],
"network": networkRef,
"other_config": vifRecord["other_config"],
"qos_algorithm_params": vifRecord["qos_algorithm_params"],
"qos_algorithm_type": vifRecord["qos_algorithm_type"],
"qos_supported_algorithms": vifRecord["qos_supported_algorithms"],
"runtime_properties": vifRecord["runtime_properties"],
"status_code": vifRecord["status_code"],
"status_detail": vifRecord["status_detail"],
}
try:
session.xenapi.VIF.create(data)
except Exception as error:
result = (1, error)
return result
if self.start_vm in ["true", "1", "t", "y", "yes", "oui"]:
session.xenapi.VM.start(backup_vm, False, True)
session.xenapi.VM.set_name_description(backup_vm, "snapshot created by tisbackup on : %s" % (now.strftime("%Y-%m-%d %H:%M")))
size_backup = 0
for vbd in session.xenapi.VM.get_VBDs(backup_vm):
if session.xenapi.VBD.get_type(vbd) == "CD" and not session.xenapi.VBD.get_record(vbd)["empty"]:
session.xenapi.VBD.eject(vbd)
else:
vdi = session.xenapi.VBD.get_VDI(vbd)
if "NULL" not in vdi:
size_backup = size_backup + int(session.xenapi.VDI.get_record(vdi)["physical_utilisation"])
result = (0, size_backup)
if self.delete_snapshot == "no":
return result
# Disable automatic boot
if "auto_poweron" in session.xenapi.VM.get_other_config(backup_vm):
session.xenapi.VM.remove_from_other_config(backup_vm, "auto_poweron")
if not str2bool(self.halt_vm):
# delete the snapshot
try:
for vbd in session.xenapi.VM.get_VBDs(snapshot):
if session.xenapi.VBD.get_type(vbd) == "CD" and not session.xenapi.VBD.get_record(vbd)["empty"]:
session.xenapi.VBD.eject(vbd)
else:
vdi = session.xenapi.VBD.get_VDI(vbd)
if "NULL" not in vdi:
session.xenapi.VDI.destroy(vdi)
session.xenapi.VM.destroy(snapshot)
except XenAPI.Failure as error:
result = (1, "error when destroy snapshot %s" % (error))
return result
else:
if status_vm == "Running":
self.logger.debug("[%s] Starting in progress", self.backup_name)
if dry_run:
print("session.xenapi.VM.start(vm,False,True)")
else:
session.xenapi.VM.start(vm, False, True)
return result
def do_backup(self, stats):
try:
# timestamp = int(time.time())
cmd = self.copy_vm_to_sr(self.vm_name, self.storage_name, self.dry_run, delete_snapshot=self.delete_snapshot)
if cmd[0] == 0:
timeExec = int(time.time()) - timestamp
stats['log']='copy of %s to an other storage OK' % (self.backup_name)
stats['status']='OK'
stats['total_files_count'] = 1
stats['total_bytes'] = cmd[1]
# timeExec = int(time.time()) - timestamp
stats["log"] = "copy of %s to an other storage OK" % (self.backup_name)
stats["status"] = "OK"
stats["total_files_count"] = 1
stats["total_bytes"] = cmd[1]
stats['backup_location'] = self.storage_name
stats["backup_location"] = self.storage_name
else:
stats['status']='ERROR'
stats['log']=cmd[1]
stats["status"] = "ERROR"
stats["log"] = cmd[1]
except BaseException as e:
stats['status']='ERROR'
stats['log']=str(e)
stats["status"] = "ERROR"
stats["log"] = str(e)
raise
def register_existingbackups(self):
"""scan backup dir and insert stats in database"""
#This backup is on target server, no data available on this server
# This backup is on target server, no data available on this server
pass
register_driver(copy_vm_xcp)

View File

@ -3,21 +3,37 @@
# Copyright (c) 2007 Tim Lauridsen <tla@rasmil.dk>
# All Rights Reserved. See LICENSE-PSF & LICENSE for details.
from .compat import ConfigParser, RawConfigParser, SafeConfigParser
from .config import BasicConfig, ConfigNamespace
from .configparser import (DEFAULTSECT, MAX_INTERPOLATION_DEPTH,
DuplicateSectionError, InterpolationDepthError,
InterpolationMissingOptionError,
InterpolationSyntaxError, NoOptionError,
NoSectionError)
from .ini import INIConfig, change_comment_syntax
from .config import BasicConfig, ConfigNamespace
from .compat import RawConfigParser, ConfigParser, SafeConfigParser
from .utils import tidy
from .configparser import (
DuplicateSectionError,
NoSectionError,
NoOptionError,
InterpolationMissingOptionError,
InterpolationDepthError,
InterpolationSyntaxError,
DEFAULTSECT,
MAX_INTERPOLATION_DEPTH,
)
__all__ = [
'BasicConfig', 'ConfigNamespace',
'INIConfig', 'tidy', 'change_comment_syntax',
'RawConfigParser', 'ConfigParser', 'SafeConfigParser',
'DuplicateSectionError', 'NoSectionError', 'NoOptionError',
'InterpolationMissingOptionError', 'InterpolationDepthError',
'InterpolationSyntaxError', 'DEFAULTSECT', 'MAX_INTERPOLATION_DEPTH',
"BasicConfig",
"ConfigNamespace",
"INIConfig",
"tidy",
"change_comment_syntax",
"RawConfigParser",
"ConfigParser",
"SafeConfigParser",
"DuplicateSectionError",
"NoSectionError",
"NoOptionError",
"InterpolationMissingOptionError",
"InterpolationDepthError",
"InterpolationSyntaxError",
"DEFAULTSECT",
"MAX_INTERPOLATION_DEPTH",
]

View File

@ -12,41 +12,48 @@ The underlying INIConfig object can be accessed as cfg.data
"""
import re
from typing import Dict, List, TextIO, Optional, Type, Union, Tuple
import six
from .configparser import (
DuplicateSectionError,
NoSectionError,
NoOptionError,
InterpolationMissingOptionError,
InterpolationDepthError,
InterpolationSyntaxError,
DEFAULTSECT,
MAX_INTERPOLATION_DEPTH,
)
# These are imported only for compatibility.
# The code below does not reference them directly.
from .configparser import Error, InterpolationError, MissingSectionHeaderError, ParsingError
from . import ini
# These are imported only for compatiability.
# The code below does not reference them directly.
from .configparser import (DEFAULTSECT, MAX_INTERPOLATION_DEPTH,
DuplicateSectionError, Error,
InterpolationDepthError, InterpolationError,
InterpolationMissingOptionError,
InterpolationSyntaxError, MissingSectionHeaderError,
NoOptionError, NoSectionError, ParsingError)
class RawConfigParser(object):
def __init__(self, defaults=None, dict_type=dict):
if dict_type != dict:
raise ValueError('Custom dict types not supported')
class RawConfigParser:
def __init__(self, defaults: Optional[Dict[str, str]] = None, dict_type: Union[Type[Dict], str] = dict):
if not isinstance(dict_type, dict):
raise ValueError("Custom dict types not supported")
self.data = ini.INIConfig(defaults=defaults, optionxformsource=self)
def optionxform(self, optionstr):
def optionxform(self, optionstr: str) -> str:
return optionstr.lower()
def defaults(self):
d = {}
secobj = self.data._defaults
def defaults(self) -> Dict[str, str]:
d: Dict[str, str] = {}
secobj: ini.INISection = self.data._defaults
name: str
for name in secobj._options:
d[name] = secobj._compat_get(name)
return d
def sections(self):
def sections(self) -> List[str]:
"""Return a list of section names, excluding [DEFAULT]"""
return list(self.data)
def add_section(self, section):
def add_section(self, section: str) -> None:
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
@ -56,28 +63,28 @@ class RawConfigParser(object):
# The default section is the only one that gets the case-insensitive
# treatment - so it is special-cased here.
if section.lower() == "default":
raise ValueError('Invalid section name: %s' % section)
raise ValueError("Invalid section name: %s" % section)
if self.has_section(section):
raise DuplicateSectionError(section)
else:
self.data._new_namespace(section)
def has_section(self, section):
def has_section(self, section: str) -> bool:
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self.data
def options(self, section):
def options(self, section: str) -> List[str]:
"""Return a list of option names for the given section name."""
if section in self.data:
return list(self.data[section])
else:
raise NoSectionError(section)
def read(self, filenames):
def read(self, filenames: Union[List[str], str]) -> List[str]:
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
@ -86,9 +93,11 @@ class RawConfigParser(object):
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Returns the list of files that were read.
"""
files_read = []
if isinstance(filenames, six.string_types):
if isinstance(filenames, str):
filenames = [filenames]
for filename in filenames:
try:
@ -100,7 +109,7 @@ class RawConfigParser(object):
fp.close()
return files_read
def readfp(self, fp, filename=None):
def readfp(self, fp: TextIO, filename: Optional[str] = None) -> None:
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
@ -110,60 +119,70 @@ class RawConfigParser(object):
"""
self.data._readfp(fp)
def get(self, section, option, vars=None):
def get(self, section: str, option: str, vars: dict = None) -> str:
if not self.has_section(section):
raise NoSectionError(section)
sec = self.data[section]
sec: ini.INISection = self.data[section]
if option in sec:
return sec._compat_get(option)
else:
raise NoOptionError(option, section)
def items(self, section):
def items(self, section: str) -> List[Tuple[str, str]]:
if section in self.data:
ans = []
opt: str
for opt in self.data[section]:
ans.append((opt, self.get(section, opt)))
return ans
else:
raise NoSectionError(section)
def getint(self, section, option):
def getint(self, section: str, option: str) -> int:
return int(self.get(section, option))
def getfloat(self, section, option):
def getfloat(self, section: str, option: str) -> float:
return float(self.get(section, option))
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
_boolean_states = {
"1": True,
"yes": True,
"true": True,
"on": True,
"0": False,
"no": False,
"false": False,
"off": False,
}
def getboolean(self, section, option):
def getboolean(self, section: str, option: str) -> bool:
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError('Not a boolean: %s' % v)
raise ValueError("Not a boolean: %s" % v)
return self._boolean_states[v.lower()]
def has_option(self, section, option):
def has_option(self, section: str, option: str) -> bool:
"""Check for the existence of a given option in a given section."""
if section in self.data:
sec = self.data[section]
else:
raise NoSectionError(section)
return (option in sec)
return option in sec
def set(self, section, option, value):
def set(self, section: str, option: str, value: str) -> None:
"""Set an option."""
if section in self.data:
self.data[section][option] = value
else:
raise NoSectionError(section)
def write(self, fp):
def write(self, fp: TextIO) -> None:
"""Write an .ini-format representation of the configuration state."""
fp.write(str(self.data))
def remove_option(self, section, option):
# FIXME Return a boolean instead of integer
def remove_option(self, section: str, option: str) -> int:
"""Remove an option."""
if section in self.data:
sec = self.data[section]
@ -175,7 +194,7 @@ class RawConfigParser(object):
else:
return 0
def remove_section(self, section):
def remove_section(self, section: str) -> bool:
"""Remove a file section."""
if not self.has_section(section):
return False
@ -183,15 +202,15 @@ class RawConfigParser(object):
return True
class ConfigDict(object):
"""Present a dict interface to a ini section."""
class ConfigDict:
"""Present a dict interface to an ini section."""
def __init__(self, cfg, section, vars):
self.cfg = cfg
self.section = section
self.vars = vars
def __init__(self, cfg: RawConfigParser, section: str, vars: dict):
self.cfg: RawConfigParser = cfg
self.section: str = section
self.vars: dict = vars
def __getitem__(self, key):
def __getitem__(self, key: str) -> Union[str, List[Union[int, str]]]:
try:
return RawConfigParser.get(self.cfg, self.section, key, self.vars)
except (NoOptionError, NoSectionError):
@ -199,8 +218,13 @@ class ConfigDict(object):
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
def get(
self,
section: str,
option: str,
raw: bool = False,
vars: Optional[dict] = None,
) -> object:
"""Get an option value for a given section.
All % interpolations are expanded in the return values, based on the
@ -223,25 +247,24 @@ class ConfigParser(RawConfigParser):
d = ConfigDict(self, section, vars)
return self._interpolate(section, option, value, d)
def _interpolate(self, section, option, rawval, vars):
def _interpolate(self, section: str, option: str, rawval: object, vars: "ConfigDict"):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
while depth: # Loop through this until it's done
depth -= 1
if "%(" in value:
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
raise InterpolationMissingOptionError(option, section, rawval, e.args[0])
else:
break
if value.find("%(") != -1:
raise InterpolationDepthError(option, section, rawval)
return value
def items(self, section, raw=False, vars=None):
def items(self, section: str, raw: bool = False, vars: Optional[dict] = None):
"""Return a list of tuples with (name, value) for each option
in the section.
@ -269,40 +292,37 @@ class ConfigParser(RawConfigParser):
d = ConfigDict(self, section, vars)
if raw:
return [(option, d[option])
for option in options]
return [(option, d[option]) for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
return [(option, self._interpolate(section, option, d[option], d)) for option in options]
class SafeConfigParser(ConfigParser):
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
_badpercent_re = re.compile(r"%[^%]|%$")
def set(self, section, option, value):
if not isinstance(value, six.string_types):
def set(self, section: str, option: str, value: object) -> None:
if not isinstance(value, str):
raise TypeError("option values must be strings")
# check for bad percent signs:
# first, replace all "good" interpolations
tmp_value = self._interpvar_re.sub('', value)
tmp_value = self._interpvar_re.sub("", value)
# then, check if there's a lone percent sign left
m = self._badpercent_re.search(tmp_value)
if m:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, m.start()))
raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, m.start()))
ConfigParser.set(self, section, option, value)
def _interpolate(self, section, option, rawval, vars):
def _interpolate(self, section: str, option: str, rawval: str, vars: ConfigDict):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
return "".join(L)
_interpvar_match = re.compile(r"%\(([^)]+)\)s").match
def _interpolate_some(self, option, accum, rest, section, map, depth):
def _interpolate_some(self, option: str, accum: List[str], rest: str, section: str, map: ConfigDict, depth: int) -> None:
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
@ -323,18 +343,14 @@ class SafeConfigParser(ConfigParser):
if m is None:
raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest)
var = m.group(1)
rest = rest[m.end():]
rest = rest[m.end() :]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
raise InterpolationMissingOptionError(option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
self._interpolate_some(option, accum, v, section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%' must be followed by '%' or '(', found: " + repr(rest))
raise InterpolationSyntaxError(option, section, "'%' must be followed by '%' or '(', found: " + repr(rest))

View File

@ -1,4 +1,10 @@
class ConfigNamespace(object):
from typing import Dict, Iterable, List, TextIO, Union, TYPE_CHECKING
if TYPE_CHECKING:
from .ini import INIConfig, INISection
class ConfigNamespace:
"""Abstract class representing the interface of Config objects.
A ConfigNamespace is a collection of names mapped to values, where
@ -12,27 +18,27 @@ class ConfigNamespace(object):
Subclasses must implement the methods for container-like access,
and this class will automatically provide dotted access.
"""
# Methods that must be implemented by subclasses
def _getitem(self, key):
def _getitem(self, key: str) -> object:
return NotImplementedError(key)
def __setitem__(self, key, value):
def __setitem__(self, key: str, value: object):
raise NotImplementedError(key, value)
def __delitem__(self, key):
def __delitem__(self, key: str) -> None:
raise NotImplementedError(key)
def __iter__(self):
def __iter__(self) -> Iterable[str]:
# FIXME Raise instead return
return NotImplementedError()
def _new_namespace(self, name):
def _new_namespace(self, name: str) -> "ConfigNamespace":
raise NotImplementedError(name)
def __contains__(self, key):
def __contains__(self, key: str) -> bool:
try:
self._getitem(key)
except KeyError:
@ -44,35 +50,35 @@ class ConfigNamespace(object):
#
# To distinguish between accesses of class members and namespace
# keys, we first call object.__getattribute__(). If that succeeds,
# the name is assumed to be a class member. Otherwise it is
# the name is assumed to be a class member. Otherwise, it is
# treated as a namespace key.
#
# Therefore, member variables should be defined in the class,
# not just in the __init__() function. See BasicNamespace for
# an example.
def __getitem__(self, key):
def __getitem__(self, key: str) -> Union[object, "Undefined"]:
try:
return self._getitem(key)
except KeyError:
return Undefined(key, self)
def __getattr__(self, name):
def __getattr__(self, name: str) -> Union[object, "Undefined"]:
try:
return self._getitem(name)
except KeyError:
if name.startswith('__') and name.endswith('__'):
if name.startswith("__") and name.endswith("__"):
raise AttributeError
return Undefined(name, self)
def __setattr__(self, name, value):
def __setattr__(self, name: str, value: object) -> None:
try:
object.__getattribute__(self, name)
object.__setattr__(self, name, value)
except AttributeError:
self.__setitem__(name, value)
def __delattr__(self, name):
def __delattr__(self, name: str) -> None:
try:
object.__getattribute__(self, name)
object.__delattr__(self, name)
@ -82,12 +88,12 @@ class ConfigNamespace(object):
# During unpickling, Python checks if the class has a __setstate__
# method. But, the data dicts have not been initialised yet, which
# leads to _getitem and hence __getattr__ raising an exception. So
# we explicitly impement default __setstate__ behavior.
def __setstate__(self, state):
# we explicitly implement default __setstate__ behavior.
def __setstate__(self, state: dict) -> None:
self.__dict__.update(state)
class Undefined(object):
class Undefined:
"""Helper class used to hold undefined names until assignment.
This class helps create any undefined subsections when an
@ -95,21 +101,24 @@ class Undefined(object):
statement is "cfg.a.b.c = 42", but "cfg.a.b" does not exist yet.
"""
def __init__(self, name, namespace):
object.__setattr__(self, 'name', name)
object.__setattr__(self, 'namespace', namespace)
def __init__(self, name: str, namespace: ConfigNamespace):
# FIXME These assignments into `object` feel very strange.
# What's the reason for it?
object.__setattr__(self, "name", name)
object.__setattr__(self, "namespace", namespace)
def __setattr__(self, name, value):
def __setattr__(self, name: str, value: object) -> None:
obj = self.namespace._new_namespace(self.name)
obj[name] = value
def __setitem__(self, name, value):
def __setitem__(self, name, value) -> None:
obj = self.namespace._new_namespace(self.name)
obj[name] = value
# ---- Basic implementation of a ConfigNamespace
class BasicConfig(ConfigNamespace):
"""Represents a hierarchical collection of named values.
@ -161,7 +170,7 @@ class BasicConfig(ConfigNamespace):
Finally, values can be read from a file as follows:
>>> from six import StringIO
>>> from io import StringIO
>>> sio = StringIO('''
... # comment
... ui.height = 100
@ -181,66 +190,73 @@ class BasicConfig(ConfigNamespace):
"""
# this makes sure that __setattr__ knows this is not a namespace key
_data = None
_data: Dict[str, str] = None
def __init__(self):
self._data = {}
def _getitem(self, key):
def _getitem(self, key: str) -> str:
return self._data[key]
def __setitem__(self, key, value):
def __setitem__(self, key: str, value: object) -> None:
# FIXME We can add any object as 'value', but when an integer is read
# from a file, it will be a string. Should we explicitly convert
# this 'value' to string, to ensure consistency?
# It will stay the original type until it is written to a file.
self._data[key] = value
def __delitem__(self, key):
def __delitem__(self, key: str) -> None:
del self._data[key]
def __iter__(self):
def __iter__(self) -> Iterable[str]:
return iter(self._data)
def __str__(self, prefix=''):
lines = []
keys = list(self._data.keys())
def __str__(self, prefix: str = "") -> str:
lines: List[str] = []
keys: List[str] = list(self._data.keys())
keys.sort()
for name in keys:
value = self._data[name]
value: object = self._data[name]
if isinstance(value, ConfigNamespace):
lines.append(value.__str__(prefix='%s%s.' % (prefix,name)))
lines.append(value.__str__(prefix="%s%s." % (prefix, name)))
else:
if value is None:
lines.append('%s%s' % (prefix, name))
lines.append("%s%s" % (prefix, name))
else:
lines.append('%s%s = %s' % (prefix, name, value))
return '\n'.join(lines)
lines.append("%s%s = %s" % (prefix, name, value))
return "\n".join(lines)
def _new_namespace(self, name):
def _new_namespace(self, name: str) -> "BasicConfig":
obj = BasicConfig()
self._data[name] = obj
return obj
def _readfp(self, fp):
def _readfp(self, fp: TextIO) -> None:
while True:
line = fp.readline()
line: str = fp.readline()
if not line:
break
line = line.strip()
if not line: continue
if line[0] == '#': continue
data = line.split('=', 1)
if not line:
continue
if line[0] == "#":
continue
data: List[str] = line.split("=", 1)
if len(data) == 1:
name = line
value = None
else:
name = data[0].strip()
value = data[1].strip()
name_components = name.split('.')
ns = self
name_components = name.split(".")
ns: ConfigNamespace = self
for n in name_components[:-1]:
if n in ns:
ns = ns[n]
if not isinstance(ns, ConfigNamespace):
raise TypeError('value-namespace conflict', n)
maybe_ns: object = ns[n]
if not isinstance(maybe_ns, ConfigNamespace):
raise TypeError("value-namespace conflict", n)
ns = maybe_ns
else:
ns = ns._new_namespace(n)
ns[name_components[-1]] = value
@ -248,7 +264,8 @@ class BasicConfig(ConfigNamespace):
# ---- Utility functions
def update_config(target, source):
def update_config(target: ConfigNamespace, source: ConfigNamespace):
"""Imports values from source into target.
Recursively walks the <source> ConfigNamespace and inserts values
@ -276,15 +293,15 @@ def update_config(target, source):
display_clock = True
display_qlength = True
width = 150
"""
for name in sorted(source):
value = source[name]
value: object = source[name]
if isinstance(value, ConfigNamespace):
if name in target:
myns = target[name]
if not isinstance(myns, ConfigNamespace):
raise TypeError('value-namespace conflict')
maybe_myns: object = target[name]
if not isinstance(maybe_myns, ConfigNamespace):
raise TypeError("value-namespace conflict")
myns = maybe_myns
else:
myns = target._new_namespace(name)
update_config(myns, value)

View File

@ -1,7 +1,2 @@
try:
# not all objects get imported with __all__
from ConfigParser import *
from ConfigParser import Error, InterpolationMissingOptionError
except ImportError:
from configparser import *
from configparser import Error, InterpolationMissingOptionError
from configparser import *
from configparser import Error, InterpolationMissingOptionError

View File

@ -7,7 +7,7 @@
Example:
>>> from six import StringIO
>>> from io import StringIO
>>> sio = StringIO('''# configure foo-application
... [foo]
... bar1 = qualia
@ -39,26 +39,31 @@ Example:
# An ini parser that supports ordered sections/options
# Also supports updates, while preserving structure
# Backward-compatiable with ConfigParser
# Backward-compatible with ConfigParser
import re
import six
from typing import Any, Callable, Dict, TextIO, Iterator, List, Optional, Set, Union
from typing import TYPE_CHECKING
from .configparser import DEFAULTSECT, ParsingError, MissingSectionHeaderError
from . import config
from .configparser import DEFAULTSECT, MissingSectionHeaderError, ParsingError
if TYPE_CHECKING:
from compat import RawConfigParser
class LineType(object):
line = None
class LineType:
line: Optional[str] = None
def __init__(self, line=None):
def __init__(self, line: Optional[str] = None) -> None:
if line is not None:
self.line = line.strip('\n')
self.line = line.strip("\n")
# Return the original line for unmodified objects
# Otherwise construct using the current attribute values
def __str__(self):
def __str__(self) -> str:
if self.line is not None:
return self.line
else:
@ -66,78 +71,87 @@ class LineType(object):
# If an attribute is modified after initialization
# set line to None since it is no longer accurate.
def __setattr__(self, name, value):
if hasattr(self,name):
self.__dict__['line'] = None
def __setattr__(self, name: str, value: object) -> None:
if hasattr(self, name):
self.__dict__["line"] = None
self.__dict__[name] = value
def to_string(self):
raise Exception('This method must be overridden in derived classes')
def to_string(self) -> str:
# FIXME Raise NotImplementedError instead
raise Exception("This method must be overridden in derived classes")
class SectionLine(LineType):
regex = re.compile(r'^\['
r'(?P<name>[^]]+)'
r'\]\s*'
r'((?P<csep>;|#)(?P<comment>.*))?$')
regex = re.compile(r"^\[" r"(?P<name>[^]]+)" r"\]\s*" r"((?P<csep>;|#)(?P<comment>.*))?$")
def __init__(self, name, comment=None, comment_separator=None,
comment_offset=-1, line=None):
super(SectionLine, self).__init__(line)
self.name = name
self.comment = comment
self.comment_separator = comment_separator
self.comment_offset = comment_offset
def __init__(
self,
name: str,
comment: Optional[str] = None,
comment_separator: Optional[str] = None,
comment_offset: int = -1,
line: Optional[str] = None,
) -> None:
super().__init__(line)
self.name: str = name
self.comment: Optional[str] = comment
self.comment_separator: Optional[str] = comment_separator
self.comment_offset: int = comment_offset
def to_string(self):
out = '[' + self.name + ']'
def to_string(self) -> str:
out: str = "[" + self.name + "]"
if self.comment is not None:
# try to preserve indentation of comments
out = (out+' ').ljust(self.comment_offset)
out = (out + " ").ljust(self.comment_offset)
out = out + self.comment_separator + self.comment
return out
def parse(cls, line):
m = cls.regex.match(line.rstrip())
@classmethod
def parse(cls, line: str) -> Optional["SectionLine"]:
m: Optional[re.Match] = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('name'), m.group('comment'),
m.group('csep'), m.start('csep'),
line)
parse = classmethod(parse)
return cls(m.group("name"), m.group("comment"), m.group("csep"), m.start("csep"), line)
class OptionLine(LineType):
def __init__(self, name, value, separator=' = ', comment=None,
comment_separator=None, comment_offset=-1, line=None):
super(OptionLine, self).__init__(line)
self.name = name
self.value = value
self.separator = separator
self.comment = comment
self.comment_separator = comment_separator
self.comment_offset = comment_offset
def __init__(
self,
name: str,
value: object,
separator: str = " = ",
comment: Optional[str] = None,
comment_separator: Optional[str] = None,
comment_offset: int = -1,
line: Optional[str] = None,
) -> None:
super().__init__(line)
self.name: str = name
self.value: object = value
self.separator: str = separator
self.comment: Optional[str] = comment
self.comment_separator: Optional[str] = comment_separator
self.comment_offset: int = comment_offset
def to_string(self):
out = '%s%s%s' % (self.name, self.separator, self.value)
def to_string(self) -> str:
out: str = "%s%s%s" % (self.name, self.separator, self.value)
if self.comment is not None:
# try to preserve indentation of comments
out = (out+' ').ljust(self.comment_offset)
out = (out + " ").ljust(self.comment_offset)
out = out + self.comment_separator + self.comment
return out
regex = re.compile(r'^(?P<name>[^:=\s[][^:=]*)'
r'(?P<sep>[:=]\s*)'
r'(?P<value>.*)$')
regex = re.compile(r"^(?P<name>[^:=\s[][^:=]*)" r"(?P<sep>[:=]\s*)" r"(?P<value>.*)$")
def parse(cls, line):
m = cls.regex.match(line.rstrip())
@classmethod
def parse(cls, line: str) -> Optional["OptionLine"]:
m: Optional[re.Match] = cls.regex.match(line.rstrip())
if m is None:
return None
name = m.group('name').rstrip()
value = m.group('value')
sep = m.group('name')[len(name):] + m.group('sep')
name: str = m.group("name").rstrip()
value: str = m.group("value")
sep: str = m.group("name")[len(name) :] + m.group("sep")
# comments are not detected in the regex because
# ensuring total compatibility with ConfigParser
@ -150,123 +164,120 @@ class OptionLine(LineType):
# include ';' in the value needs to be addressed.
# Also, '#' doesn't mark comments in options...
coff = value.find(';')
if coff != -1 and value[coff-1].isspace():
comment = value[coff+1:]
coff: int = value.find(";")
if coff != -1 and value[coff - 1].isspace():
comment = value[coff + 1 :]
csep = value[coff]
value = value[:coff].rstrip()
coff = m.start('value') + coff
coff = m.start("value") + coff
else:
comment = None
csep = None
coff = -1
return cls(name, value, sep, comment, csep, coff, line)
parse = classmethod(parse)
def change_comment_syntax(comment_chars='%;#', allow_rem=False):
comment_chars = re.sub(r'([\]\-\^])', r'\\\1', comment_chars)
regex = r'^(?P<csep>[%s]' % comment_chars
def change_comment_syntax(comment_chars: str = "%;#", allow_rem: bool = False) -> None:
comment_chars: str = re.sub(r"([\]\-\^])", r"\\\1", comment_chars)
regex: str = r"^(?P<csep>[%s]" % comment_chars
if allow_rem:
regex += '|[rR][eE][mM]'
regex += r')(?P<comment>.*)$'
regex += "|[rR][eE][mM]"
regex += r")(?P<comment>.*)$"
CommentLine.regex = re.compile(regex)
class CommentLine(LineType):
regex = re.compile(r'^(?P<csep>[;#])'
r'(?P<comment>.*)$')
regex: re.Pattern = re.compile(r"^(?P<csep>[;#]|[rR][eE][mM])" r"(?P<comment>.*)$")
def __init__(self, comment='', separator='#', line=None):
super(CommentLine, self).__init__(line)
self.comment = comment
self.separator = separator
def __init__(self, comment: str = "", separator: str = "#", line: Optional[str] = None) -> None:
super().__init__(line)
self.comment: str = comment
self.separator: str = separator
def to_string(self):
def to_string(self) -> str:
return self.separator + self.comment
def parse(cls, line):
m = cls.regex.match(line.rstrip())
@classmethod
def parse(cls, line: str) -> Optional["CommentLine"]:
m: Optional[re.Match] = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('comment'), m.group('csep'), line)
parse = classmethod(parse)
return cls(m.group("comment"), m.group("csep"), line)
class EmptyLine(LineType):
# could make this a singleton
def to_string(self):
return ''
def to_string(self) -> str:
return ""
value = property(lambda self: '')
value = property(lambda self: "")
def parse(cls, line):
@classmethod
def parse(cls, line: str) -> Optional["EmptyLine"]:
if line.strip():
return None
return cls(line)
parse = classmethod(parse)
class ContinuationLine(LineType):
regex = re.compile(r'^\s+(?P<value>.*)$')
regex: re.Pattern = re.compile(r"^\s+(?P<value>.*)$")
def __init__(self, value, value_offset=None, line=None):
super(ContinuationLine, self).__init__(line)
def __init__(self, value: str, value_offset: Optional[int] = None, line: Optional[str] = None) -> None:
super().__init__(line)
self.value = value
if value_offset is None:
value_offset = 8
self.value_offset = value_offset
self.value_offset: int = value_offset
def to_string(self):
return ' '*self.value_offset + self.value
def to_string(self) -> str:
return " " * self.value_offset + self.value
def parse(cls, line):
m = cls.regex.match(line.rstrip())
@classmethod
def parse(cls, line: str) -> Optional["ContinuationLine"]:
m: Optional[re.Match] = cls.regex.match(line.rstrip())
if m is None:
return None
return cls(m.group('value'), m.start('value'), line)
parse = classmethod(parse)
return cls(m.group("value"), m.start("value"), line)
class LineContainer(object):
def __init__(self, d=None):
class LineContainer:
def __init__(self, d: Optional[Union[List[LineType], LineType]] = None) -> None:
self.contents = []
self.orgvalue = None
self.orgvalue: str = None
if d:
if isinstance(d, list): self.extend(d)
else: self.add(d)
if isinstance(d, list):
self.extend(d)
else:
self.add(d)
def add(self, x):
def add(self, x: LineType) -> None:
self.contents.append(x)
def extend(self, x):
for i in x: self.add(i)
def extend(self, x: List[LineType]) -> None:
for i in x:
self.add(i)
def get_name(self):
def get_name(self) -> str:
return self.contents[0].name
def set_name(self, data):
def set_name(self, data: str) -> None:
self.contents[0].name = data
def get_value(self):
def get_value(self) -> str:
if self.orgvalue is not None:
return self.orgvalue
elif len(self.contents) == 1:
return self.contents[0].value
else:
return '\n'.join([('%s' % x.value) for x in self.contents
if not isinstance(x, CommentLine)])
return "\n".join([("%s" % x.value) for x in self.contents if not isinstance(x, CommentLine)])
def set_value(self, data):
def set_value(self, data: object) -> None:
self.orgvalue = data
lines = ('%s' % data).split('\n')
lines: List[str] = ("%s" % data).split("\n")
# If there is an existing ContinuationLine, use its offset
value_offset = None
value_offset: Optional[int] = None
for v in self.contents:
if isinstance(v, ContinuationLine):
value_offset = v.value_offset
@ -282,40 +293,45 @@ class LineContainer(object):
else:
self.add(EmptyLine())
def get_line_number(self) -> Optional[int]:
return self.contents[0].line_number if self.contents else None
name = property(get_name, set_name)
value = property(get_value, set_value)
def __str__(self):
s = [x.__str__() for x in self.contents]
return '\n'.join(s)
line_number = property(get_line_number)
def finditer(self, key):
def __str__(self) -> str:
s: List[str] = [x.__str__() for x in self.contents]
return "\n".join(s)
def finditer(self, key: str) -> Iterator[Union[SectionLine, OptionLine]]:
for x in self.contents[::-1]:
if hasattr(x, 'name') and x.name==key:
if hasattr(x, "name") and x.name == key:
yield x
def find(self, key):
def find(self, key: str) -> Union[SectionLine, OptionLine]:
for x in self.finditer(key):
return x
raise KeyError(key)
def _make_xform_property(myattrname, srcattrname=None):
private_attrname = myattrname + 'value'
private_srcname = myattrname + 'source'
def _make_xform_property(myattrname: str, srcattrname: Optional[str] = None) -> property:
private_attrname: str = myattrname + "value"
private_srcname: str = myattrname + "source"
if srcattrname is None:
srcattrname = myattrname
def getfn(self):
srcobj = getattr(self, private_srcname)
def getfn(self) -> Callable:
srcobj: Optional[object] = getattr(self, private_srcname)
if srcobj is not None:
return getattr(srcobj, srcattrname)
else:
return getattr(self, private_attrname)
def setfn(self, value):
srcobj = getattr(self, private_srcname)
def setfn(self, value: Callable) -> None:
srcobj: Optional[object] = getattr(self, private_srcname)
if srcobj is not None:
setattr(srcobj, srcattrname, value)
else:
@ -325,31 +341,38 @@ def _make_xform_property(myattrname, srcattrname=None):
class INISection(config.ConfigNamespace):
_lines = None
_options = None
_defaults = None
_optionxformvalue = None
_optionxformsource = None
_compat_skip_empty_lines = set()
_lines: List[LineContainer] = None
_options: Dict[str, object] = None
_defaults: Optional["INISection"] = None
_optionxformvalue: "INIConfig" = None
_optionxformsource: "INIConfig" = None
_compat_skip_empty_lines: Set[str] = set()
def __init__(self, lineobj, defaults=None, optionxformvalue=None, optionxformsource=None):
def __init__(
self,
lineobj: LineContainer,
defaults: Optional["INISection"] = None,
optionxformvalue: Optional["INIConfig"] = None,
optionxformsource: Optional["INIConfig"] = None,
) -> None:
self._lines = [lineobj]
self._defaults = defaults
self._optionxformvalue = optionxformvalue
self._optionxformsource = optionxformsource
self._options = {}
_optionxform = _make_xform_property('_optionxform')
_optionxform = _make_xform_property("_optionxform")
def _compat_get(self, key):
def _compat_get(self, key: str) -> str:
# identical to __getitem__ except that _compat_XXX
# is checked for backward-compatible handling
if key == '__name__':
if key == "__name__":
return self._lines[-1].name
if self._optionxform: key = self._optionxform(key)
if self._optionxform:
key = self._optionxform(key)
try:
value = self._options[key].value
del_empty = key in self._compat_skip_empty_lines
value: str = self._options[key].value
del_empty: bool = key in self._compat_skip_empty_lines
except KeyError:
if self._defaults and key in self._defaults._options:
value = self._defaults._options[key].value
@ -357,13 +380,14 @@ class INISection(config.ConfigNamespace):
else:
raise
if del_empty:
value = re.sub('\n+', '\n', value)
value = re.sub("\n+", "\n", value)
return value
def _getitem(self, key):
if key == '__name__':
def _getitem(self, key: str) -> object:
if key == "__name__":
return self._lines[-1].name
if self._optionxform: key = self._optionxform(key)
if self._optionxform:
key = self._optionxform(key)
try:
return self._options[key].value
except KeyError:
@ -372,22 +396,25 @@ class INISection(config.ConfigNamespace):
else:
raise
def __setitem__(self, key, value):
if self._optionxform: xkey = self._optionxform(key)
else: xkey = key
def __setitem__(self, key: str, value: object) -> None:
if self._optionxform:
xkey = self._optionxform(key)
else:
xkey = key
if xkey in self._compat_skip_empty_lines:
self._compat_skip_empty_lines.remove(xkey)
if xkey not in self._options:
# create a dummy object - value may have multiple lines
obj = LineContainer(OptionLine(key, ''))
obj = LineContainer(OptionLine(key, ""))
self._lines[-1].add(obj)
self._options[xkey] = obj
# the set_value() function in LineContainer
# automatically handles multi-line values
self._options[xkey].value = value
def __delitem__(self, key):
if self._optionxform: key = self._optionxform(key)
def __delitem__(self, key: str) -> None:
if self._optionxform:
key = self._optionxform(key)
if key in self._compat_skip_empty_lines:
self._compat_skip_empty_lines.remove(key)
for l in self._lines:
@ -395,14 +422,16 @@ class INISection(config.ConfigNamespace):
for o in l.contents:
if isinstance(o, LineContainer):
n = o.name
if self._optionxform: n = self._optionxform(n)
if key != n: remaining.append(o)
if self._optionxform:
n = self._optionxform(n)
if key != n:
remaining.append(o)
else:
remaining.append(o)
l.contents = remaining
del self._options[key]
def __iter__(self):
def __iter__(self) -> Iterator[str]:
d = set()
for l in self._lines:
for x in l.contents:
@ -421,26 +450,25 @@ class INISection(config.ConfigNamespace):
d.add(x)
def _new_namespace(self, name):
raise Exception('No sub-sections allowed', name)
raise Exception("No sub-sections allowed", name)
def make_comment(line):
return CommentLine(line.rstrip('\n'))
def make_comment(line: str) -> CommentLine:
return CommentLine(line.rstrip("\n"))
def readline_iterator(f):
"""iterate over a file by only using the file object's readline method"""
have_newline = False
def readline_iterator(f: TextIO) -> Iterator[str]:
"""Iterate over a file by only using the file object's readline method."""
have_newline: bool = False
while True:
line = f.readline()
line: Optional[str] = f.readline()
if not line:
if have_newline:
yield ""
return
if line.endswith('\n'):
if line.endswith("\n"):
have_newline = True
else:
have_newline = False
@ -448,57 +476,67 @@ def readline_iterator(f):
yield line
def lower(x):
def lower(x: str) -> str:
return x.lower()
class INIConfig(config.ConfigNamespace):
_data = None
_sections = None
_defaults = None
_optionxformvalue = None
_optionxformsource = None
_sectionxformvalue = None
_sectionxformsource = None
_data: LineContainer = None
_sections: Dict[str, object] = None
_defaults: INISection = None
_optionxformvalue: Callable = None
_optionxformsource: Optional["INIConfig"] = None
_sectionxformvalue: Optional["INIConfig"] = None
_sectionxformsource: Optional["INIConfig"] = None
_parse_exc = None
_bom = False
def __init__(self, fp=None, defaults=None, parse_exc=True,
optionxformvalue=lower, optionxformsource=None,
sectionxformvalue=None, sectionxformsource=None):
def __init__(
self,
fp: TextIO = None,
defaults: Dict[str, object] = None,
parse_exc: bool = True,
optionxformvalue: Callable = lower,
optionxformsource: Optional[Union["INIConfig", "RawConfigParser"]] = None,
sectionxformvalue: Optional["INIConfig"] = None,
sectionxformsource: Optional["INIConfig"] = None,
) -> None:
self._data = LineContainer()
self._parse_exc = parse_exc
self._optionxformvalue = optionxformvalue
self._optionxformsource = optionxformsource
self._sectionxformvalue = sectionxformvalue
self._sectionxformsource = sectionxformsource
self._sections = {}
if defaults is None: defaults = {}
self._sections: Dict[str, INISection] = {}
if defaults is None:
defaults = {}
self._defaults = INISection(LineContainer(), optionxformsource=self)
for name, value in defaults.items():
self._defaults[name] = value
if fp is not None:
self._readfp(fp)
_optionxform = _make_xform_property('_optionxform', 'optionxform')
_sectionxform = _make_xform_property('_sectionxform', 'optionxform')
_optionxform = _make_xform_property("_optionxform", "optionxform")
_sectionxform = _make_xform_property("_sectionxform", "optionxform")
def _getitem(self, key):
def _getitem(self, key: str) -> INISection:
if key == DEFAULTSECT:
return self._defaults
if self._sectionxform: key = self._sectionxform(key)
if self._sectionxform:
key = self._sectionxform(key)
return self._sections[key]
def __setitem__(self, key, value):
raise Exception('Values must be inside sections', key, value)
def __setitem__(self, key: str, value: object):
raise Exception("Values must be inside sections", key, value)
def __delitem__(self, key):
if self._sectionxform: key = self._sectionxform(key)
def __delitem__(self, key: str) -> None:
if self._sectionxform:
key = self._sectionxform(key)
for line in self._sections[key]._lines:
self._data.contents.remove(line)
del self._sections[key]
def __iter__(self):
def __iter__(self) -> Iterator[str]:
d = set()
d.add(DEFAULTSECT)
for x in self._data.contents:
@ -507,35 +545,31 @@ class INIConfig(config.ConfigNamespace):
yield x.name
d.add(x.name)
def _new_namespace(self, name):
def _new_namespace(self, name: str) -> INISection:
if self._data.contents:
self._data.add(EmptyLine())
obj = LineContainer(SectionLine(name))
self._data.add(obj)
if self._sectionxform: name = self._sectionxform(name)
if self._sectionxform:
name = self._sectionxform(name)
if name in self._sections:
ns = self._sections[name]
ns._lines.append(obj)
else:
ns = INISection(obj, defaults=self._defaults,
optionxformsource=self)
ns = INISection(obj, defaults=self._defaults, optionxformsource=self)
self._sections[name] = ns
return ns
def __str__(self):
def __str__(self) -> str:
if self._bom:
fmt = u'\ufeff%s'
fmt = "\ufeff%s"
else:
fmt = '%s'
fmt = "%s"
return fmt % self._data.__str__()
__unicode__ = __str__
_line_types = [EmptyLine, CommentLine, SectionLine, OptionLine, ContinuationLine]
_line_types = [EmptyLine, CommentLine,
SectionLine, OptionLine,
ContinuationLine]
def _parse(self, line):
def _parse(self, line: str) -> Any:
for linetype in self._line_types:
lineobj = linetype.parse(line)
if lineobj:
@ -544,7 +578,7 @@ class INIConfig(config.ConfigNamespace):
# can't parse line
return None
def _readfp(self, fp):
def _readfp(self, fp: TextIO) -> None:
cur_section = None
cur_option = None
cur_section_name = None
@ -554,21 +588,20 @@ class INIConfig(config.ConfigNamespace):
try:
fname = fp.name
except AttributeError:
fname = '<???>'
fname = "<???>"
line_count = 0
exc = None
line = None
for line in readline_iterator(fp):
# Check for BOM on first line
if line_count == 0 and isinstance(line, six.text_type):
if line[0] == u'\ufeff':
if line_count == 0 and isinstance(line, str):
if line[0] == "\ufeff":
line = line[1:]
self._bom = True
line_obj = self._parse(line)
line_count += 1
if not cur_section and not isinstance(line_obj, (CommentLine, EmptyLine, SectionLine)):
if self._parse_exc:
raise MissingSectionHeaderError(fname, line_count, line)
@ -588,7 +621,7 @@ class INIConfig(config.ConfigNamespace):
cur_option.extend(pending_lines)
pending_lines = []
if pending_empty_lines:
optobj._compat_skip_empty_lines.add(cur_option_name)
optobj._compat_skip_empty_lines.add(cur_option_name) # noqa : F821
pending_empty_lines = False
cur_option.add(line_obj)
else:
@ -633,9 +666,7 @@ class INIConfig(config.ConfigNamespace):
else:
cur_section_name = cur_section.name
if cur_section_name not in self._sections:
self._sections[cur_section_name] = \
INISection(cur_section, defaults=self._defaults,
optionxformsource=self)
self._sections[cur_section_name] = INISection(cur_section, defaults=self._defaults, optionxformsource=self)
else:
self._sections[cur_section_name]._lines.append(cur_section)
@ -644,8 +675,11 @@ class INIConfig(config.ConfigNamespace):
if isinstance(line_obj, EmptyLine):
pending_empty_lines = True
if line_obj:
line_obj.line_number = line_count
self._data.extend(pending_lines)
if line and line[-1] == '\n':
if line and line[-1] == "\n":
self._data.add(EmptyLine())
if exc:

View File

@ -1,8 +1,13 @@
from typing import TYPE_CHECKING, List
from . import compat
from .ini import EmptyLine, LineContainer
if TYPE_CHECKING:
from .ini import LineType
def tidy(cfg):
def tidy(cfg: compat.RawConfigParser):
"""Clean up blank lines.
This functions makes the configuration look clean and
@ -19,8 +24,7 @@ def tidy(cfg):
if isinstance(cont[i], LineContainer):
tidy_section(cont[i])
i += 1
elif (isinstance(cont[i-1], EmptyLine) and
isinstance(cont[i], EmptyLine)):
elif isinstance(cont[i - 1], EmptyLine) and isinstance(cont[i], EmptyLine):
del cont[i]
else:
i += 1
@ -34,11 +38,11 @@ def tidy(cfg):
cont.append(EmptyLine())
def tidy_section(lc):
cont = lc.contents
i = 1
def tidy_section(lc: "LineContainer"):
cont: List[LineType] = lc.contents
i: int = 1
while i < len(cont):
if isinstance(cont[i-1], EmptyLine) and isinstance(cont[i], EmptyLine):
if isinstance(cont[i - 1], EmptyLine) and isinstance(cont[i], EmptyLine):
del cont[i]
else:
i += 1

View File

@ -1,10 +1,11 @@
[tool.black]
line-length = 140
[tool.ruff]
# Allow lines to be as long as 120.
line-length = 140
indent-width = 4
[tool.ruff.lint]
ignore = ["F401","F403","F405","E402"]
ignore = ["F401","F403","F405","E402","E701","E722","E741"]

View File

@ -5,14 +5,16 @@ from huey import RedisHuey
from tisbackup import tis_backup
huey = RedisHuey('tisbackup', host='localhost')
huey = RedisHuey("tisbackup", host="localhost")
@huey.task()
def run_export_backup(base, config_file, mount_point, backup_sections):
try:
#Log
logger = logging.getLogger('tisbackup')
# Log
logger = logging.getLogger("tisbackup")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
@ -24,24 +26,26 @@ def run_export_backup(base, config_file, mount_point, backup_sections):
backup_sections = backup_sections.split(",")
else:
backup_sections = []
backup = tis_backup(dry_run=False,verbose=True,backup_base_dir=base)
backup = tis_backup(dry_run=False, verbose=True, backup_base_dir=base)
backup.read_ini_file(config_file)
mount_point = mount_point
backup.export_backups(backup_sections,mount_point)
backup.export_backups(backup_sections, mount_point)
except Exception as e:
return(str(e))
return str(e)
finally:
os.system("/bin/umount %s" % mount_point)
os.rmdir(mount_point)
return "ok"
def get_task():
return task
return task
def set_task(my_task):
global task
task = my_task
global task
task = my_task
task = None

View File

@ -375,10 +375,8 @@ def run_command(cmd, info=""):
def check_mount_disk(partition_name, refresh):
mount_point = check_already_mount(partition_name, refresh)
if not refresh:
mount_point = "/mnt/TISBACKUP-" + str(time.time())
os.mkdir(mount_point)
flash("must mount " + partition_name)
@ -425,7 +423,6 @@ def last_backup():
@app.route("/export_backup")
def export_backup():
raise_error("", "")
backup_dict = read_config()
sections = []